query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Show employee a level below
def employees_json_id(request, employee_id): curent_employee = Employee.objects.get(pk=int(employee_id)) if curent_employee.is_manager: employee_list = Employee.objects.filter(manager=curent_employee) employees = list() for employee in employee_list: manager_dict = model_to_dict(employee) manager_dict['first_name'] = employee.user.first_name manager_dict['last_name'] = employee.user.last_name manager_dict['photo'] = employee.photo.url if employee.photo else '' employees.append(manager_dict) data = {"employees": employees} else: return JsonResponse(status=400, data={"error": "Employee with id={} not is_manager".format(int(employee_id))}) return JsonResponse(data=data, content_type='application/json', safe=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_level_exp_mouseover(self):\n if self.skill_tree_displaying:\n return\n self.tooltip_focus = self.level_and_exp_rect\n player_panel_renderer.draw_exp_details(self.player_dict['experience'])", "def getEmployeeLevel(self,number:int):\n allLevels = [1,2,3,4]\n if number == 0:\n return allLevels[randint(0,3)]\n else:\n return allLevels", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def display_label(self) -> str:\n return f\"{self.name} ({len(self.level_doors)} levels)\"", "def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level", "def refresh_level_and_exp(self):\n if self.skill_tree_displaying:\n return\n player_panel_renderer.draw_level_and_experience(self.player_dict['level'], self.player_dict['profession'],\n self.player_dict['experience'], refresh=True)", "def getLevel(unique_name):", "def _visualize_helper(self, tree, level):\n tab_level = \" \" * level\n val = tree.value if tree.value is not None else -1\n print(\"%d: %s%s == %f\" % (level, tab_level, tree.attribute_name, val))", "def level(score):\n user_level = \"\"\n if score < 20:\n user_level = \"elementary\"\n elif score < 30:\n user_level = \"intermediate\"\n elif score < 35:\n user_level = \"upper intermediate\"\n else:\n user_level = \"advanced\"\n return user_level", "def color_levelname (self, record):\n record.levelname = '%s%s%s' % \\\n (self.term.bold_red if record.levelno >= 50 else \\\n self.term.bold_red if record.levelno >= 40 else \\\n self.term.bold_yellow if record.levelno >= 30 else \\\n self.term.bold_white if record.levelno >= 20 else \\\n self.term.yellow, record.levelname.title(), self.term.normal)\n return record", "def get_level(self, level):\n return", "def addLevel(self):\n pass", "def showBestStatLevelReached(self) :\n bestLevel = 0\n for level in self.level_history :\n bestLevel = level.level if bestLevel < level.level else bestLevel\n Scenario.messageBestStatLevelReached(bestLevel)", "def showNbLevelLose(self) :\n nbLevelLose = 0\n for level in self.level_history :\n if level.result == 0:\n nbLevelLose += 1\n Scenario.messageGetNbLevelLose(nbLevelLose)", "async def level(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n await ctx.send(f\"{ctx.author.mention}, your level is {level}. Use the `-info` command to learn more!\")", "def user_story_07(self):\n for ind in self.individuals.values():\n if ind.age >= 150:\n print(f'US07 - {ind.name} is age {ind.age}, which is over 150 years old, on line {ind._age_line}')", "def get_level(rol):\n\treturn rol.level", "def showWorstGainWon(self) :\n worstGainWon = self.level_history[0].profit\n for level in self.level_history :\n worstGainWon = level.profit if ((worstGainWon > level.profit) and (level.result == 1)) else worstGainWon\n Scenario.messageGetWorstGainWon(worstGainWon)", "def display(self, tree, level = 0):\n\t\tresult = \"\"\n\t\tfor name, node in tree.soon:\n\t\t\tresult += \" \"*level+repr(node)+\"\\n\"\n\t\t\tresult += self.display(tree.getSoon(name),level + 1)\n\t\treturn result", "def generate_longitudinal_level_title(grid, field, level):\n time_str = generate_grid_time_begin(grid).strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n disp = grid.x[\"data\"][level] / 1000.0\n if disp >= 0:\n direction = \"east\"\n else:\n direction = \"west\"\n disp = -disp\n l1 = f\"{generate_grid_name(grid)} {disp:.1f} km {direction} of origin {time_str} \"\n field_name = generate_field_name(grid, field)\n return l1 + \"\\n\" + field_name", "async def level(self, ctx, user: discord.User = None):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n if user is None:\n id = ctx.author.id\n name = ctx.author.display_name\n else:\n id = user.id\n name = user.display_name\n xp = config.load_xp()\n exp = 0\n level = 0\n if str(guild) in xp['guilds']:\n if str(id) in xp['guilds'][str(guild)]:\n exp = xp['guilds'][str(guild)][str(id)]['xp']\n level = xp['guilds'][str(guild)][str(id)]['level']\n await ctx.send(name + \" is currently level: \" + str(level) + \" with \" + str(exp) + \" experience!\")\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")", "def LevelUpPlayer(self):\n self.lvl += 1\n self.skillPts += 1\n percent = 0.5\n if self.lvl > 8:\n percent = 0.45 # reduce how much xp is added once higher level\n elif self.lvl > 16:\n percent = 0.4\n elif self.lvl > 25:\n percent = 0.3\n self.xpNeeded = floor(self.xpNeeded + self.xpNeeded * percent)", "def getLevel(self):\n return self.level", "def lineage(self) -> 'lngmod.Level':\n return self._parent", "def level_name(self) -> str:\n return getLevelName(self.level)", "def showprivelages(self):\r\n\t\tprint (\"An administrator has the following abilities: \")\r\n\t\tfor power in self.powers:\r\n\t\t\tprint (\"- \" + power)", "def reveal_occupants(idx, huts):\n msg = \"\"\n print(\"展示小屋内部情况...\")\n for i in range(len(huts)):\n occupant_info = \"<%d:%s>\" % (i + 1, huts[i])\n if i + 1 == idx:\n occupant_info = \"\\033[1m\" + occupant_info + \"\\033[0m\"\n msg += occupant_info + \" \"\n\n print(\"\\t\" + msg)\n print_dotted_line()", "def test_infrastructure_usage_difficulty_level_display_string(self):\n self.assertEquals(str(self.level), \"Medium (Ecorp)\")", "def show_actual_lvl(self, surface, actual_lvl):\n black_color = (0, 0, 0)\n white_color = (255, 255, 255)\n pygame.draw.rect(surface, black_color, (0, 690, 40, 30))\n arial_font = pygame.font.SysFont(\"arial.ttf\", 20)\n lvl_text = arial_font.render(\"lvl:{}\".format(actual_lvl),\n False, white_color)\n surface.blit(lvl_text, [5, 695])", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def print_level(self, node , level):\n if node is None and level == 1: \n self.level.append(None)\n elif node != None:\n # set the root level as the base case\n if level == 1: \n self.level.append(node)\n elif level > 1 : \n self.print_level(node.left , level - 1) \n self.print_level(node.right , level - 1) \n return self.level", "def print_level(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n level = level[:index] + PLAYER + level[1 + index:]\n print(level)", "def print_level(self, node, depth):\n\n if not node:\n return\n\n if depth == 1:\n self.print_count += 1\n print(node.point, self.print_count)\n\n elif depth > 1:\n self.print_level(node.left, depth - 1)\n self.print_level(node.right, depth - 1)", "def level_up(self):\n if self.level < self.max_level:\n self.level = self.level + 1\n self.update_level_buttons()", "def min_edu_level(self) -> int:\n if len(self._history_edus) == 0:\n return 0\n # add one since allocating 0 for 'none'\n return int(max([e.education_lvl for e in self._history_edus])) + 1", "def risk_display(register, draft=False):\n if draft:\n level = register.draft_risk_level\n name = register.get_draft_risk_level_display()\n id_ = \"id_max_draft_risk_\" + str(register.id)\n else:\n level = register.final_risk_level\n name = register.get_final_risk_level_display()\n id_ = \"id_max_final_risk_\" + str(register.id)\n\n if level == Register.LEVEL_VERY_LOW:\n label = 'label-very-low'\n elif level == Register.LEVEL_LOW:\n label = 'label-low'\n elif level == Register.LEVEL_MEDIUM:\n label = 'label-medium'\n elif level == Register.LEVEL_HIGH:\n label = 'label-high'\n else:\n label = 'label-very-high'\n\n return mark_safe(\n '<span id=\"%s\" class=\"label %s\">%s</span>' % (id_, label, name))", "def support(self, level=1):\n if level == 1:\n sup = (2 * self.pivot_point) - self.last_high\n elif level == 2:\n sup = self.pivot_point - (self.last_high - self.last_low)\n elif level == 3:\n sup = self.last_low - 2*(self.last_high - self.pivot_point)\n else:\n raise ValueError('Not a valid level. Must be 1, 2, or 3')\n return sup", "def test_infrastructure_maintenance_difficulty_level_display_string(self):\n self.assertEquals(str(self.level), \"Medium (Ecorp)\")", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def level(levelValue):\n def _decoration(fcn):\n fcn.level = levelValue\n return fcn\n return _decoration", "def print_level():\n print(\"\")\n\n def show_hide_word(word):\n \"\"\"show/hide finished/unfinished words\"\"\"\n if word not in current_level.finished_words:\n return \"*\" * len(word)\n return word\n\n current_level.layout.print_layout(\n show_hide_word,\n # Print unfinished words first with '*'\n set(current_level.words) - set(current_level.finished_words),\n )\n\n # level state\n print(\"\")\n print(\"Level: %d/%d\" % (current_level_index + 1, len(all_levels)))\n if current_level.bonus_words:\n bonus_words_status = \"Bonus words: %d/%d\" % (\n len(current_level.finished_bonus_words),\n len(current_level.bonus_words)\n )\n bonus_words_status += \" %s\" % \" \".join(\n change_case(word)\n if word in current_level.finished_bonus_words\n else \"*\" * len(word)\n for word in current_level.bonus_words\n )\n print(bonus_words_status)\n\n # characters\n print(\"\")\n print(\"Chars: %s\" % \" \".join(change_case(char) for char in current_level.chars))\n print(\"\")", "def displayLevelOfDetail(*args, levelOfDetail: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def show_employee_menu(self):\n \n action_str = \"\"\n\n while True:\n print(self.LENGTH_STAR * \"*\")\n print(\"EMPLOYEES MENU\\n\")\n print(\"1 Print overview of all employees\")\n print(\"2 Pilots\")\n print(\"3 Cabin Crew\")\n print(\"B Back\\n\")\n\n action_str = self.choose_action([\"1\", \"2\" ,\"3\" ,\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"1\", \"2\", \"3\", \"b\"])\n\n if action_str == \"1\":\n self.show_overview_of_all_employees()\n\n elif action_str == \"2\":\n self.show_pilot_or_crew_menu(self.PILOT)\n\n elif action_str == \"3\":\n self.show_pilot_or_crew_menu(self.CREW)\n\n elif action_str == \"b\":\n return", "def getLevels():", "def add_level(self, level):\n return", "async def leaderboard(self, ctx):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n xp = config.load_xp()\n scores = {}\n if str(guild) in xp['guilds']:\n for user in xp['guilds'][str(guild)]:\n scores.update({ctx.guild.get_member(int(user)).display_name: xp['guilds'][str(guild)][user]['xp']})\n sorted_scores = collections.OrderedDict(sorted(scores.items(), key=lambda x: x[1], reverse=True))\n message = discord.Embed(title='Leaderboard', description=ctx.guild.name + \"'s most active users\")\n current_field = 1\n field_limit = 25\n for index, (key, value) in enumerate(sorted_scores.items()):\n if current_field <= field_limit:\n message.add_field(name=str(index+1) + \": \" + key,\n value=\"with: \" + str(value) + \" xp\",\n inline=False)\n current_field += 1\n else:\n break\n await ctx.send('', embed=message)\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")", "def draw_level_of_detail(here, there, mlist):\r\n dist = distance(here, there)\r\n\r\n index = bisect.bisect_left(mlist, [dist, None])\r\n model = mlist[min(index, len(mlist) - 1)][1]\r\n model.position(there[0], there[1], there[2])\r\n model.draw()", "def attack_bonus_on_level(self, level):\n raise NotImplementedError", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def show_score(self):\n self._pause = True # pause the game when you check the score\n score_list = self.get_high_score(self._filename) # get the record\n top = tk.Toplevel() # create a Toplevel\n top.title('Score Board')\n # create a text label for notification\n title = tk.Label(top, text='High Scored Player in This Level', width=70)\n title.pack(side=tk.TOP, ipady=1)\n if score_list is None: # check whether the record is empty\n tk.Label(top, text='No record in this level yet!', width=70).pack(side=tk.TOP, ipady=1)\n else: # if not empty\n for record in score_list: # shows up all the detail\n tk.Label(top, text=record[0] + ' : ' + record[1]).pack(side=tk.TOP, ipady=1)", "def maximum_level(self, question_type):\n\t\treturn 2", "async def level(self, ctx, member: discord.Member = None):\n member = member or ctx.author\n if member.bot:\n return\n await get_user_level(ctx, member)", "def plot_leaf_profile(self, leaf: str | int, picket: int, show: bool = True):\n mlc_meas = Enumerable(self.mlc_meas).single(\n lambda m: leaf in m.full_leaf_nums and m.picket_num == picket\n )\n ax = mlc_meas.plot_detailed_profile()\n ax.set_title(f\"MLC profile Leaf: {leaf}, Picket: {picket}\")\n for lg, rg, m in zip(\n self.pickets[picket].left_guard_separated,\n self.pickets[picket].right_guard_separated,\n mlc_meas.marker_lines,\n ):\n g_val = lg(m.point1.y)\n rg_val = rg(m.point1.y)\n ax.axvline(g_val, color=\"green\", label=\"Guard rail\")\n ax.axvline(rg_val, color=\"green\", label=\"Guard rail\")\n ax.legend()\n if show:\n plt.show()", "def level_up(self):\n pass", "def show_max_age_label(self):\n self.draw_max_age = True", "def __str__(self):\n return f'Employee #{self.employee_id} ({self.job_group.name}, ${self.job_group.wage}) worked {self.hours_worked} hours on {self.date_worked}.'", "def __level(self, *args, **kwargs):\n pass", "def display_label(self) -> str:\n return \"linear (variable levels)\"", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n \n print(\"=========================================================\")", "def export_level(self, levelname=None):\n if levelname is None:\n if self.project is None:\n s = 'no project is opened.'\n print s\n Console.write(s)\n return\n if self.project.level is None:\n s = 'current project has no current level to export.'\n print s\n Console.write(s)\n return\n self.project.level.export()\n else:\n raise NotImplementedError()", "def update_level_buttons(self):\n self.level_buttons[0].set_text(\"Level \" + str(self.level))\n if self.level <= 1:\n self.level_buttons[1].disable()\n else:\n self.level_buttons[1].enable()\n if self.level >= self.max_level:\n self.level_buttons[2].disable()\n else:\n self.level_buttons[2].enable()", "def setLevel(self, level):\n self.lvl = level", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def levelorder(root):\n h = height(root)\n for i in range(1, h + 1):\n print_level(root, i)", "def get_level(level_name):\n return LEVELS[level_name.upper()]", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n h = height(root) \n for i in range(1, h+1): \n printGivenLevel(root, i) \n print(\"=========================================================\")", "def showBestGainWon(self) :\n bestGainWon = 0\n for level in self.level_history :\n bestGainWon = level.profit if bestGainWon < level.profit else bestGainWon\n Scenario.messageGetBestGainWon(bestGainWon)", "async def level_up(user, channel):\n server = db[str(user.guild.id)]\n stats = list(server.find({'id': user.id}))\n lvl_start = stats[-1]['level']\n print(lvl_start)\n experience = stats[-1]['experience']\n x = 35\n cnt = 1\n while (x < experience):\n x = 2 * x + 10\n cnt += 1\n\n if experience >= x:\n lvl_end = cnt - 1\n else:\n lvl_end = lvl_start\n print(lvl_end)\n\n if lvl_start < lvl_end:\n new_stats = {\"$set\": {'level': lvl_end}}\n server.update_one(stats[-1], new_stats)\n ls = lvl_end * 150\n server = db[str(user.guild.id)]\n stats = list(server.find({'id': user.id}))\n cred = stats[-1]['credits'] + ls\n new_stats = {\"$set\": {'credits': cred}}\n server.update_one(stats[-1], new_stats)\n embed = discord.Embed(title=f'{user} has leveled up to {lvl_end}.', description=f'You have been given\\\n{ls} tears for your active-ness.\\n\\\nSaving {ls} tears in your vault of tears.', color=discord.Color.teal())\n embed.set_footer(text='😭')\n await channel.send(embed=embed)", "def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)", "def showAverageNbAttemptsByLevels(self) :\n level_current = 1\n while level_current <= len(self.list_level) :\n self.showAverageNbAttemptsByLevel(level_current)\n level_current += 1", "def print_level(self, list_level, window, begin, wall, end):\n\t\tfor y in range(0,15):\n\t\t\tfor x in range(0,15):\n\t\t\t\tif list_level[y][x] == 'd':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(begin, (position_x,position_y))\n\t\t\t\telif list_level[y][x] == 'm':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(wall, (position_x,position_y))\n\t\t\t\telif list_level[y][x] == 'a':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(end, (position_x,position_y))\n\t\t\t\telse: # it's a 0\n\t\t\t\t\tcontinue", "def check_points_and_level_up(self):\n if self.points > 20 * self.level:\n self.level += 1\n self.refresh_rate = self.refresh_rate * 0.75", "def lvl_algo(next_level):\n total_xp_needed = (next_level * next_level)\n return total_xp_needed", "def setLevelReached(self, level):\n \n if(0 < level and level < 6 and self.__levelReached < level):\n self.__levelReached = level\n self.savePlayerInfo()\n return True\n else:\n return False\n print\"level reached: \" + self.__levelReached", "def display_skill_tree(self):\n self.skill_tree_displaying = True\n self.skill_tree.render_skill_tree()", "def set_level(self, x, level):\n return x * 10 ** ((level - self.ref_level) / 20)", "def calc_level(xp, dominion):\n if xp < 3:\n xp_potential = 1\n if xp >= 3 and xp < 6:\n xp_potential = 2\n if xp >= 6 and xp < 12:\n xp_potential = 3\n if xp >= 12 and xp < 24:\n xp_potential = 4\n if xp >= 24 and xp < 48:\n xp_potential = 5\n if xp >= 48 and xp < 72:\n xp_potential = 6\n if xp >= 72 and xp < 96:\n xp_potential = 7\n if xp >= 96 and xp < 130:\n xp_potential = 8\n if xp >= 130 and xp < 170:\n xp_potential = 9\n if xp >= 170:\n xp_potential = 10\n if dominion < 2:\n dom_potential = 1\n if dominion >= 2 and dominion < 4:\n dom_potential = 2\n if dominion >= 4 and dominion < 10:\n dom_potential = 3\n if dominion >= 10 and dominion < 22:\n dom_potential = 4\n if dominion >= 22 and dominion < 38:\n dom_potential = 5\n if dominion >= 38 and dominion < 57:\n dom_potential = 6\n if dominion >= 57 and dominion < 76:\n dom_potential = 7\n if dominion >= 76 and dominion < 95:\n dom_potential = 8\n if dominion >= 95 and dominion < 124:\n dom_potential = 9\n if dominion >= 124:\n dom_potential = 10\n return min(xp_potential, dom_potential)", "def display_hall_of_fame(self) -> None:\n print(\"Hall of fame\")\n for env, dico in self.score_dic.items():\n print(\"Environment :\", env)\n for team, score in sorted(dico.items()):\n print(\"team: \", team, \"mean: \", score[0], \"std: \", score[1])", "def showWorstBetUse(self) :\n worstBetUse = self.level_history[0].bet\n for level in self.level_history :\n worstBetUse = level.bet if worstBetUse > level.bet else worstBetUse\n Scenario.messageGetWorstBetUse(worstBetUse)", "async def info(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n embed = discord.Embed()\n embed.colour = discord.Colour.blurple()\n embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)\n\n embed.title = f'Your current level : {level}'\n\n embed.add_field(name='Question', value=f'{self.enigmas[level][\"question\"]}')\n\n embed.set_footer(text='I love Ducks')\n\n await ctx.send(embed=embed)", "def checking_level_end(level, inventory, x_player, y_player, hamster_energy, board):\n\n next_level = False\n if level == 1 and board[y_player][x_player] == '⇵':\n print_level_title(4)\n you_win = guess_number_game.main()\n if you_win:\n next_level = True\n\n elif level == 2 and board[y_player][x_player] == '⇵':\n print_level_title(4)\n won = add_numbers_game.main()\n if won:\n next_level = True\n\n elif level == 3 and board[y_player][x_player] == '⇵':\n print_level_title(4)\n win = remember_number_game.main()\n if win:\n next_level = True\n\n elif level == 4 and hamster_energy == 0:\n next_level = True\n return next_level", "def get_food_level(self):\n return self.plant", "def update_fuel_level(self, new_level):\n if new_level <= self.fuel_capacity:\n self.fuel_level = new_level\n else:\n print(\"The tank can't hold that much!\")", "def showLevels(self):\n\n pa = 'EUR_USD GBP_USD AUD_USD USD_CAD USD_CHF NZD_USD'.split(' ')\n gr = 'D H4 H1 M30 M15'.split(' ')\n for i in xrange(len(pa)):\n dfs = p.DataFrame()\n for j in xrange(len(gr)):\n try:\n training = self.viewTraining(pa[i], gr[j])\n df = training[0]\n manifest = training[1]\n dfs = dfs.combine_first(manifest.set_index('timeframe'))\n plot(df.get_values())\n except: \n ''\n try:\n dfs['timeframe'] = dfs.index # save the lost field before calling set_index()\n print dfs.set_index('forecast').sort(ascending=False)\n except: ''\n dfp = p.read_csv('/ml.dev/bin/data/oanda/ticks/{0}/{0}-M5.csv'.format(pa[i])).sort(ascending=True).tail(50).ix[:,'closeAsk']\n plot(dfp)\n title('{0} Forecast'.format(pa[i]))\n legend(gr)\n show();\n #break", "def level_down(self):\n if self.level > 1:\n self.level = self.level - 1\n self.update_level_buttons()", "def test_level_up_out_of_limit(self):\n self.sold.experience = 50\n self.sold.level_up()\n self.assertEqual(self.sold.experience, 50)", "def update_fuel_level(self, new_level):\r\n if new_level <= self.fuel_capacity:\r\n self.fuel_level = new_level\r\n else:\r\n print(\"The tank can't hold that much!\")", "async def tolevel(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if args[0].isdigit():\n level = int(args[0])\n skill = ' '.join(args[1:])\n else:\n level = None\n skill = ' '.join(args)\n out = users.calc_xp_to_level(ctx.user_object, skill, level)\n await ctx.send(out)", "def get_cells_to_hide(self, level):\n level = LEVEL[level]\n bottom = level[BOTTOM]\n top = level[TOP]\n return random.randint(bottom, top)", "def setLevel(self, level):\n self.level = level", "def PaintLevel(self, item, dc, level, y, x_maincol):\r\n\r\n if item.IsHidden():\r\n return y, x_maincol\r\n \r\n # Handle hide root (only level 0)\r\n if self.HasAGWFlag(wx.TR_HIDE_ROOT) and level == 0:\r\n for child in item.GetChildren():\r\n y, x_maincol = self.PaintLevel(child, dc, 1, y, x_maincol)\r\n \r\n # end after expanding root\r\n return y, x_maincol\r\n \r\n # calculate position of vertical lines\r\n x = x_maincol + _MARGIN # start of column\r\n\r\n if self.HasAGWFlag(wx.TR_LINES_AT_ROOT):\r\n x += _LINEATROOT # space for lines at root\r\n \r\n if self.HasButtons():\r\n x += (self._btnWidth-self._btnWidth2) # half button space\r\n else:\r\n x += (self._indent-self._indent/2)\r\n \r\n if self.HasAGWFlag(wx.TR_HIDE_ROOT):\r\n x += self._indent*(level-1) # indent but not level 1\r\n else:\r\n x += self._indent*level # indent according to level\r\n \r\n # set position of vertical line\r\n item.SetX(x)\r\n item.SetY(y)\r\n\r\n h = self.GetLineHeight(item)\r\n y_top = y\r\n y_mid = y_top + (h/2)\r\n y += h\r\n\r\n exposed_x = dc.LogicalToDeviceX(0)\r\n exposed_y = dc.LogicalToDeviceY(y_top)\r\n\r\n # horizontal lines between rows?\r\n draw_row_lines = self.HasAGWFlag(wx.TR_ROW_LINES)\r\n\r\n if self.IsExposed(exposed_x, exposed_y, _MAX_WIDTH, h + draw_row_lines):\r\n if draw_row_lines:\r\n total_width = self._owner.GetHeaderWindow().GetWidth()\r\n # if the background colour is white, choose a\r\n # contrasting colour for the lines\r\n pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DLIGHT), 1, wx.SOLID)\r\n dc.SetPen((self.GetBackgroundColour() == wx.WHITE and [pen] or [wx.WHITE_PEN])[0])\r\n dc.DrawLine(0, y_top, total_width, y_top)\r\n dc.DrawLine(0, y_top+h, total_width, y_top+h)\r\n \r\n # draw item\r\n self.PaintItem(item, dc)\r\n\r\n # restore DC objects\r\n dc.SetBrush(wx.WHITE_BRUSH)\r\n dc.SetPen(self._dottedPen)\r\n\r\n # clip to the column width\r\n clip_width = self._owner.GetHeaderWindow().GetColumn(self._main_column).GetWidth()\r\n## clipper = wx.DCClipper(dc, x_maincol, y_top, clip_width, 10000)\r\n\r\n if not self.HasAGWFlag(wx.TR_NO_LINES): # connection lines\r\n\r\n # draw the horizontal line here\r\n dc.SetPen(self._dottedPen)\r\n x2 = x - self._indent\r\n if x2 < (x_maincol + _MARGIN):\r\n x2 = x_maincol + _MARGIN\r\n x3 = x + (self._btnWidth-self._btnWidth2)\r\n if self.HasButtons():\r\n if item.HasPlus():\r\n dc.DrawLine(x2, y_mid, x - self._btnWidth2, y_mid)\r\n dc.DrawLine(x3, y_mid, x3 + _LINEATROOT, y_mid)\r\n else:\r\n dc.DrawLine(x2, y_mid, x3 + _LINEATROOT, y_mid)\r\n else:\r\n dc.DrawLine(x2, y_mid, x - self._indent/2, y_mid)\r\n \r\n if item.HasPlus() and self.HasButtons(): # should the item show a button?\r\n \r\n if self._imageListButtons:\r\n\r\n # draw the image button here\r\n image = wx.TreeItemIcon_Normal\r\n if item.IsExpanded():\r\n image = wx.TreeItemIcon_Expanded\r\n if item.IsSelected():\r\n image += wx.TreeItemIcon_Selected - wx.TreeItemIcon_Normal\r\n xx = x - self._btnWidth2 + _MARGIN\r\n yy = y_mid - self._btnHeight2\r\n dc.SetClippingRegion(xx, yy, self._btnWidth, self._btnHeight)\r\n self._imageListButtons.Draw(image, dc, xx, yy, wx.IMAGELIST_DRAW_TRANSPARENT)\r\n dc.DestroyClippingRegion()\r\n\r\n elif self.HasAGWFlag(wx.TR_TWIST_BUTTONS):\r\n\r\n # draw the twisty button here\r\n dc.SetPen(wx.BLACK_PEN)\r\n dc.SetBrush(self._hilightBrush)\r\n button = [wx.Point() for j in xrange(3)]\r\n if item.IsExpanded():\r\n button[0].x = x - (self._btnWidth2+1)\r\n button[0].y = y_mid - (self._btnHeight/3)\r\n button[1].x = x + (self._btnWidth2+1)\r\n button[1].y = button[0].y\r\n button[2].x = x\r\n button[2].y = button[0].y + (self._btnHeight2+1)\r\n else:\r\n button[0].x = x - (self._btnWidth/3)\r\n button[0].y = y_mid - (self._btnHeight2+1)\r\n button[1].x = button[0].x\r\n button[1].y = y_mid + (self._btnHeight2+1)\r\n button[2].x = button[0].x + (self._btnWidth2+1)\r\n button[2].y = y_mid\r\n \r\n dc.DrawPolygon(button)\r\n\r\n else: # if (HasAGWFlag(wxTR_HAS_BUTTONS))\r\n\r\n rect = wx.Rect(x-self._btnWidth2, y_mid-self._btnHeight2, self._btnWidth, self._btnHeight)\r\n flag = (item.IsExpanded() and [wx.CONTROL_EXPANDED] or [0])[0]\r\n wx.RendererNative.GetDefault().DrawTreeItemButton(self, dc, rect, flag) \r\n\r\n # restore DC objects\r\n dc.SetBrush(wx.WHITE_BRUSH)\r\n dc.SetPen(self._dottedPen)\r\n dc.SetTextForeground(wx.BLACK)\r\n\r\n if item.IsExpanded():\r\n\r\n # process lower levels\r\n if self._imgWidth > 0:\r\n oldY = y_mid + self._imgHeight2\r\n else:\r\n oldY = y_mid + h/2\r\n \r\n for child in item.GetChildren():\r\n\r\n y, x_maincol = self.PaintLevel(child, dc, level+1, y, x_maincol)\r\n\r\n # draw vertical line\r\n if not self.HasAGWFlag(wx.TR_NO_LINES):\r\n Y1 = child.GetY() + child.GetHeight()/2\r\n dc.DrawLine(x, oldY, x, Y1)\r\n\r\n return y, x_maincol", "def showUserStats(self) :\n self.getAllStats()\n self.getNbTotalLevelsPlayed()\n Scenario.messageAllStats(self.level_history[0].created_at)\n self.showBestStats()\n self.showWorstStats()\n self.showAverageStats()", "def atten_employee(list_emp, name):\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Employee Attendance Report:\\n\")\r\n for worker in list_emp:\r\n if worker.name == name:\r\n attendance_by_emp.write(\"%s-\\n\" % worker.name)\r\n for date in worker.attendance:\r\n attendance_by_emp.write(\"\\t\" + date + '\\n')\r\n print(\"Report issued!\\n\")\r\n return\r\n print(\"%s is not in employee log\\n\" % name)\r\n return", "def ability_bonus_on_level(self, level):\n raise NotImplementedError", "async def get_xp(level, command):\n if command == \"profile\":\n return 250 * level\n return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)", "def do_hire(self):\n return f\"{self} is hiring employees\"", "def generate_latitudinal_level_title(grid, field, level):\n time_str = generate_grid_time_begin(grid).isoformat() + \"Z\"\n disp = grid.y[\"data\"][level] / 1000.0\n if disp >= 0:\n direction = \"north\"\n else:\n direction = \"south\"\n disp = -disp\n l1 = f\"{generate_grid_name(grid)} {disp:.1f} km {direction} of origin {time_str} \"\n field_name = generate_field_name(grid, field)\n return l1 + \"\\n\" + field_name", "def __change_level(self, level):\n self.level = level", "def measureUnfoldedLevel(ds, verbose = False):\n points = getIndexedTraces(ds)\n from sklearn.cluster import KMeans\n x = points[points[:,0] > 150, 1].reshape((-1,1))\n # remove outliers \n std = np.std(x)\n mean = np.mean(x)\n x = x[x > mean - 4*std].reshape((-1,1)) \n # ML clustering\n kmeans = KMeans(n_clusters=3, random_state=0).fit(x)\n x_cluster = kmeans.predict(x)\n means = [ np.mean(x[x_cluster == i]) for i in range(3)]\n means = sorted(means) \n level_one = means[1]\n if np.abs(level_one) > 0.35 or np.abs(level_one) < 0.1:\n print(\"Warning! Unfolded level detector in unexpected range: \",leven_one)\n if verbose: #feedback\n pyplot.figure()\n pyplot.hist2d(points[:,0], points[:,1], \n bins=(70*2, 50*2),\n range = [[0, 700], [-0.45, 0.05]],\n cmax = 100000/4 # clip max\n )\n pyplot.plot([0,700], [level_one]*2, 'r--')\n return level_one", "def GroundExcelAddLevelElite(builder, LevelElite):\n return AddLevelElite(builder, LevelElite)", "def printStatus(level,text):\n\tglobal t0\n\tpre = \"[{0:>7.2f}] \".format(time.time()-t0)\n\tfor x in range(0,level):\n\t\tpre += \"-\"\n\tpre += \"> \"\n\tprint(pre+text)" ]
[ "0.6092373", "0.60603535", "0.5886119", "0.5763377", "0.57354516", "0.5616083", "0.5597218", "0.556029", "0.55143595", "0.55038935", "0.5442811", "0.54350245", "0.5397879", "0.53973335", "0.5397157", "0.53344226", "0.53100413", "0.529323", "0.52915984", "0.52886397", "0.52444255", "0.5239765", "0.52354604", "0.5207865", "0.5183789", "0.518338", "0.5167245", "0.5164364", "0.5153924", "0.5141755", "0.51384", "0.5135465", "0.51134694", "0.51099676", "0.50865644", "0.506971", "0.5064515", "0.50567317", "0.50326836", "0.5031097", "0.50264305", "0.5004881", "0.4997766", "0.4981559", "0.49780947", "0.49723625", "0.49615857", "0.49563786", "0.49506706", "0.4946968", "0.4925963", "0.49244952", "0.49206445", "0.49083695", "0.48988605", "0.48967403", "0.48897102", "0.4882761", "0.48682612", "0.48677742", "0.48509327", "0.4845108", "0.4836622", "0.48336956", "0.4832337", "0.48264357", "0.4825804", "0.4820447", "0.4814536", "0.48066112", "0.48053098", "0.48045838", "0.48044062", "0.48029172", "0.48004475", "0.47950673", "0.4793008", "0.47886214", "0.4782075", "0.4780444", "0.47773933", "0.4775036", "0.47683343", "0.47590843", "0.4753108", "0.47460052", "0.4742064", "0.47384545", "0.4702726", "0.47021747", "0.4701808", "0.47016206", "0.4691323", "0.46888193", "0.4686598", "0.4682308", "0.46818313", "0.46797454", "0.4672184", "0.46678546", "0.46594787" ]
0.0
-1
Get all employees manager as json
def employees_manager(request): # current_employee = Employee.objects.get(user__pk=request.user.pk) manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True) employee = Employee.objects.get(pk=request.user.employee_user.id) employee_dict = model_to_dict(employee) employee_dict['first_name'] = employee.user.first_name employee_dict['last_name'] = employee.user.last_name employee_dict['photo'] = employee.photo.url if employee.photo else '' print employee_dict if len(manager_list) > 0: result_list = list(manager_list) all_managers_list = found_all_managers(manager_list, result_list) else: data = {"employee_managers": employee_dict} return JsonResponse(data=data, content_type='application/json', safe=False) employees = list() for manager in all_managers_list: manager_dict = model_to_dict(manager) manager_dict['first_name'] = manager.user.first_name manager_dict['last_name'] = manager.user.last_name manager_dict['photo'] = manager.photo.url if manager.photo else '' employees.append(manager_dict) employees.append(employee_dict) data = {"employee_managers": employees} return JsonResponse(data=data, content_type='application/json', safe=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_employees_directory(self):\n response = requests.get(self._base_url + \"employees/directory\",\n auth=(self._api_key, \"pass\"),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n emps_json = json.loads(response.text)['employees']\n return {int(e['id']): Employee(e['displayName'],\n e['firstName'],\n e['lastName'],\n e['nickname']) for e in emps_json}", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def get_employees(self):\n return self.employees", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def getEmployees(self):\n return self.employees", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get_managers():\n return {'managers': get_users('managers')}", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def employees(self) -> object:\n return self._employees", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret", "def get_mentee_list():\n # Get db object and users table\n db = get_db()\n users = db.users\n \n # Search database for mentees\n cursor = users.find({\"role\": \"Mentee\"})\n \n context = {'mentees': []}\n \n for document in cursor:\n temp = document\n del temp['_id']\n context['mentees'].append(temp)\n \n context['url'] = \"/api/v1/mentees/\"\n return flask.jsonify(**context)", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def show_all_amenities():\n\n amenities = storage.all(Amenity).values()\n new_list = []\n for amenity in amenities:\n new_list.append(amenity.to_dict())\n return jsonify(new_list)", "def get_amenities():\n amenities = []\n for amenity in storage.all(Amenity).values():\n amenities.append(amenity.to_dict())\n return jsonify(amenities)", "def get_amenities():\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def all_amenities():\n amenities_list = []\n for amenity in storage.all(Amenity).values():\n amenities_list.append(amenity.to_dict())\n return jsonify(amenities_list)", "def return_amenities():\n amenities = list(storage.all(Amenity).values())\n amenity_list = []\n for amenity in amenities:\n amenity_list.append(amenity.to_dict())\n return jsonify(amenity_list)", "def get(self, request):\n employee = EmployeeDetail.objects.all()\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n many=True\n ).data\n }\n return Response(response)", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def list_all_amenities():\n data = storage.all('Amenity')\n amenities = [v.to_dict() for k, v in data.items()]\n return jsonify(amenities)", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body", "def get_persons(self):\n response = self.do_request('/management/persons/export/json/')\n if response:\n return response.json()", "def get_amenities():\n amenities_dict_list = [amenity.to_dict() for amenity in\n storage.all(\"Amenity\").values()]\n return jsonify(amenities_dict_list)", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def amenity_ret():\n ame_list = []\n all_objs = storage.all(\"Amenity\")\n for obj in all_objs.values():\n ame_list.append(obj.to_dict())\n return jsonify(ame_list)", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def get_all_data():\n return jsonify(service.get_all_data())", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def get_managers_list(self):\n try:\n role_id = [x[0] for x in self.db_handler.get_roles_list() if x[1] == 'Менеджер'][0]\n staff_by_role = self.db_handler.get_all_staff_by_role_id(role_id)\n\n self.logger.write_to_log('managers list got', 'model')\n\n return staff_by_role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id': u.id, 'admin': u.admin})\n return {'users': usersJSON}", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id':u.id, 'admin':u.admin})\n return { 'users' : usersJSON }", "def get_stores():\n stores = Store.query # no need to order\n stores_data = [store.to_dict() for store in stores.all()]\n return jsonify(stores=stores_data)", "def get_users():\n return jsonify([\n users.to_dict()\n for users in models.storage.all('User').values()\n ])", "def get_companies():\n all_companies = storage.all(Company).values()\n list_companies = []\n for company in all_companies:\n list_companies.append(company.to_dict())\n return jsonify(list_companies)", "def getELUsers(**kwargs):\n \n for key in kwargs:\n if type(kwargs[key]) == list:\n kwargs[key] = kwargs[key][0]\n \n allELUsers = ELUser.ELUser.all(**kwargs)\n allELUsersDictionaries = [dict(eluser) for eluser in allELUsers if dict(eluser)]\n \n return flask.Response(\n response = json.dumps(allELUsersDictionaries),\n status = 200,\n content_type = 'application/json'\n )", "def list(self):\n # Grupos en los que el usuario formo parte\n curso = self.get_curso_actual()\n entregadores = identity.current.user.get_entregadores(curso)\n r = cls.select(IN(cls.q.entregador, entregadores), orderBy=-Entrega.q.fecha)\n return dict(records=r, name=name, namepl=namepl, limit_to=identity.current.user.paginador)", "def get_all_user():\n user = UserModel.objects()\n return jsonify(user), 200", "def get_mentor_list():\n # Get db object and users table\n db = get_db()\n users = db.users\n \n # Search database for mentors\n cursor = users.find({\"role\": \"Mentor\"})\n \n context = {'mentors': []}\n \n for document in cursor:\n temp = document\n del temp['_id']\n context['mentors'].append(temp)\n \n context['url'] = \"/api/v1/mentors/\"\n return flask.jsonify(**context)", "def get_all(self, context, type_):\n types = None\n if type_ and isinstance(type_, basestring):\n types = type_.strip(\",\").split(\",\")\n\n try:\n db_resource_mgrs_data = self.db_api.get_all_resource_managers(\n context, types=types)\n\n _resource_mgrs_data = []\n for db_resource_mgr_data in db_resource_mgrs_data:\n _resource_mgrs_data.append(_make_response(\n db_resource_mgr_data))\n except Exception as e:\n msg = (\"Error retrieving the 'resource managers' reason : %s\"\n % e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n return _resource_mgrs_data", "def amenities_all():\n return jsonify(list(map(lambda x: x.to_dict(),\n list(storage.all(Amenity).values()))))", "def jsonify_all(cls):\n return jsonify(accounts=[account.as_dict() for account in cls.query.all()])", "def get(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n ).data\n }\n return Response(response)", "def data():\n empo = get(empo_url).json()\n dict_new = {}\n for u in empo:\n empo_id = str(u.get(\"id\"))\n tasks = get(tasks_url + \"?userId=\" + empo_id).json()\n task_list = []\n for i in tasks:\n if i[\"userId\"] != empo_id:\n task_list.append({\"username\": u[\"username\"],\n \"task\": i[\"title\"],\n \"completed\": i[\"completed\"]})\n dict_new[empo_id] = task_list\n with open(\"todo_all_employees.json\", \"w\") as file:\n dump(dict_new, file)", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get_all_team_info():\n # hit this url in browser or postman like http://127.0.0.1:5000/getAllTeamInfo and it will return json data\n final_team_list = []\n if request.method == 'GET':\n teams = Team.query.all()\n for rec in range(len(teams)):\n final_team = {}\n final_team['Team_name'] = teams[rec].team_name\n final_team['Team_ID'] = teams[rec].team_id\n final_team_list.append(final_team)\n return json.dumps({\"TeamInformation\": final_team_list})", "def get(self):\n\n users = UserModel.get_top_earners()\n users_json = [user.json() for user in users]\n return {\"users\": users_json}", "def get_sem_schedule():\n\n rows = db.engine.execute(f\"SELECT * FROM sem_schedule WHERE EMP_ID = \\\"{g.user.EMP_ID}\\\"\")\n res = []\n for row in rows:\n res.append(dict(row))\n return jsonify(res)", "def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()", "def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def jugadores():\n\tjugadores = Jugador.query.order_by(Jugador.id.desc()).filter_by(activo=True)\n\treturn jsonify([jugador.to_dict()\n\t\t for jugador in jugadores])", "def user_ret():\n user_list = []\n all_objs = storage.all(\"User\")\n for obj in all_objs.values():\n user_list.append(obj.to_dict())\n return jsonify(user_list)", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def get(self, request):\n machines = Machine.objects.all()\n serializer = MachineSerializer(machines, many=True)\n return JsonResponse({'machines': [data['name'] for data in serializer.data]})", "def departments(department_name=None):\n\tif not department_name:\n\t\tdepartment_data = _serialize_list(Department.query.all(), backrefs=[\"employees\"])\n\t\tdepartment_data = {'departments': department_data, 'total': len(department_data)}\n\telse:\n\t\tdepartment_data = _serialize_model(Department.query.filter_by(name=department_name).first(), backrefs=[\"employees\"])\n\n\treturn jsonify(department_data)", "def userJSON():\n user = session.query(User).all()\n result = []\n\n for i in user:\n result += [i.serialize]\n\n return jsonify(Users=result)", "def manager_configs_list(self):\n _, body = self.request('/v1.1/managers/configs', 'GET')\n return body", "def restaurants_all() -> str:\n restaurant_objects = restaurants.load_restaurants()\n return jsonify(restaurant_objects)", "def get():\n\n # OPTION #1 - my preferable\n # sql = \"select * from director\"\n # if not (res := db.engine.execute(sql).fetchall()):\n # raise NotFoundError\n # return jsonify([dict(i) for i in res])\n\n # OPTION #2 - I don't like ORM queries, so it's just to meet the lesson topic\n if not (res := Director.query.all()):\n raise NotFoundError\n return jsonify(DirectorSchema(many=True).dump(res))", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def get_companies(self):\n response = self.do_request('/management/companies/export/json')\n if response:\n return response.json()", "def get_manager_stats(self):\n try:\n names, quantities, types, passwords = zip(*[(manager.name,\n manager.transports_in_fleet, manager.fleet_type, manager.password)\n for manager in self.manager_agents.values()])\n except ValueError:\n names, quantities, types, passwords = [], [], [], []\n\n df = pd.DataFrame.from_dict(\n {\"password\": passwords, \"name\": names, \"transports_in_fleet\": quantities, \"fleet_type\": types})\n return df", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def get_articles():\n _, articles = base_query(db_session)\n return jsonify([p.serialize for p in articles])", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def get_employee_permissions(user_name: str, store_name: str, employee_name: str):\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.EMPLOYEE_PERMISSIONS, store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(\n employee_name) # TODO FOR NOW RETURN INFORMATION MAYBE TO CHANGE TO NEW FUNCTION", "def get_all_users():\n users = []\n for mv in storage.all(\"User\").values():\n users.append(mv.to_dict())\n return jsonify(users)", "def employers_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def all_Users():\n new_dict = []\n for usr in storage.all('User').values():\n new_dict.append(usr.to_dict())\n return jsonify(new_dict)", "def employees(self, employees: object):\n\n self._employees = employees", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def _amenities():\n if request.method == \"GET\":\n all_amenities = []\n for key in storage.all(\"Amenity\").values():\n all_amenities.append(key.to_dict())\n return jsonify(all_amenities)\n\n if request.method == 'POST':\n if not request.is_json:\n return \"Not a JSON\", 400\n\n all_amenities = Amenity(**request.get_json())\n if \"name\" not in all_amenities.to_dict().keys():\n return \"Missing name\", 400\n\n all_amenities.save()\n return all_amenities.to_dict(), 201", "def all_items_handler():\n items = getAllItems()\n return jsonify(items=[i.serialize for i in items])", "def get_store_components():\n store_components = StoreComponent.query # no need to order\n store_components_data = [\n component.to_dict() for component in store_components.all()]\n return jsonify(store_components=store_components_data)", "def manager_agents(self):\n return self.get(\"manager_agents\")", "def employee(self) -> object:\n return self._employee", "def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)" ]
[ "0.80657506", "0.79710835", "0.74464977", "0.7288061", "0.7011785", "0.6977914", "0.6967183", "0.6929463", "0.69241434", "0.6900269", "0.67404765", "0.66503465", "0.64858013", "0.6479257", "0.64647675", "0.6463374", "0.63625044", "0.6354452", "0.6331073", "0.6328877", "0.63211584", "0.62040216", "0.6190528", "0.617846", "0.6162493", "0.61377424", "0.60873115", "0.60214597", "0.5989768", "0.5965218", "0.5957248", "0.5952048", "0.5927821", "0.5923615", "0.59184974", "0.59083897", "0.5820829", "0.5812709", "0.5803209", "0.5799211", "0.5789114", "0.5773231", "0.5760487", "0.57067466", "0.56926244", "0.5674779", "0.5672757", "0.5660795", "0.56512856", "0.5647963", "0.5612086", "0.55994385", "0.5594398", "0.55810434", "0.5571979", "0.55591166", "0.5557574", "0.55394554", "0.5537992", "0.5512271", "0.5507969", "0.54877526", "0.54702705", "0.5465304", "0.54472685", "0.54422474", "0.5438528", "0.54249394", "0.5422105", "0.5420575", "0.54196966", "0.54195625", "0.54132056", "0.5401084", "0.5393211", "0.5388399", "0.5379751", "0.53795993", "0.5372473", "0.5369503", "0.5365875", "0.53656864", "0.53621024", "0.5357331", "0.53562504", "0.53541774", "0.5351981", "0.5343933", "0.53437763", "0.5335564", "0.5316571", "0.5301369", "0.5298152", "0.52975893", "0.5291447", "0.5289881", "0.52894837", "0.5287192", "0.52857566", "0.52831143" ]
0.7887314
2
Securely download files from user.
def employee_delete_file(request, employee_id, filename): current_user = Employee.objects.get(user__pk=request.user.pk) if not current_user.hasAccessTo(employee_id): logUnauthorizedAccess( "User tried to delete file he didnt have access to", request, filename ) return HttpResponse('unauthorized', status=401) user_dir = util.get_user_files_dir(employee_id) filename = os.path.join(user_dir, filename.replace('..', '')) if not os.path.isfile(filename): return HttpResponseNotFound('File does not exist') os.remove(filename) return HttpResponseRedirect(reverse('employee_detail', args=[employee_id]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restricted_download():\n aaa.require(fail_redirect='/login')\n return bottle.static_file('static_file', root='.')", "def download_files(self):", "def download_file(self, net_id, request_id, file_name):\n current_user_roles = get_user_roles()\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n try:\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"{0}\".format(secure_filename(file_name)), mimetype=\"blob\")\n except Exception as e:\n print(e)\n return abort(404)\n return abort(403)", "def download_file(file_ext, file_name):\n\n log_app.debug(\"file_name : %s \", \tfile_name)\n log_app.debug(\"file_ext : %s \", \tfile_ext)\n log_app.info(\"file_ext in AUTHORIZED_FILETYPES_LIST: %s\", (file_ext in AUTHORIZED_FILETYPES_LIST) )\n\n\n if file_ext in AUTHORIZED_FILETYPES_LIST :\n\n file_mimetype \t\t= AUTHORIZED_FILETYPES_DICT[file_ext][\"mimetype\"]\n file_foldername \t= AUTHORIZED_FILETYPES_DICT[file_ext][\"folder\"]\n file_folder \t\t= \"static/{}/\".format(file_foldername)\n file_name_ext \t\t= \"{}.{}\".format(file_name, file_ext)\n full_filepath \t\t= file_folder + file_name_ext\n\n try :\n\n return send_file(\tfull_filepath,\n mimetype\t\t\t= file_mimetype,\n attachment_filename\t= file_name_ext,\n as_attachment\t\t= True\n )\n except :\n\n log_app.error(\"downloading this file is not working: %s.%s \", file_name, file_ext )\n\n return redirect(url_for('home'))\n\n else :\n\n log_app.error(\"downloading this file is not authorized: %s.%s \", file_name, file_ext )\n\n return redirect(url_for('home'))", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def download(self, net_id, request_id, request_date):\n current_user_roles = get_user_roles()\n\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n if self.make_pdf(net_id, request_id, request_date):\n try:\n with ZipFile(\"{0}/user_uploads/{1}/{2}/[{1}-{3}]_request.zip\".format(self.__APP_PATH__, net_id, request_id, request_date), mode=\"w\") as zip_archive:\n for user_file in scandir(\"{0}/user_uploads/{1}/{2}\".format(self.__APP_PATH__, net_id, request_id)):\n if \"_request.zip\" not in user_file.name and user_file.name not in self.__SPECIAL_FILES__:\n zip_archive.write(user_file.path, user_file.name, ZIP_DEFLATED)\n\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"[{0}-{1}]_request.zip\".format(net_id, request_date), mimetype=\"blob\")\n except Exception as e:\n print(e)\n return abort(404)\n return abort(403)", "def get_feedback_file(filename):\n \n if 'username'in session:\n \n s3_resource = boto3.resource('s3')\n bucket = s3_resource.Bucket(S3_BUCKET)\n \n file_obj = bucket.Object(filename).get()\n \n return Response(file_obj['Body'].read(),\n mimetype='text/plain',\n headers={'Content-Disposition': f'attachment;filename={filename}'}\n )\n \n flash('You need to be logged in to download files.', 'info')\n return redirect(url_for('login'))", "def download(connection, priv_key, server_pub_key):\r\n\r\n # Get the filename from the user\r\n file_name = input('What file would you like to download from the server?: ')\r\n\r\n # Tell the server to prepare to download a file\r\n connection.sendall(rsa.encrypt(b'DOWNLOAD', server_pub_key))\r\n\r\n # Send the file name to the server\r\n connection.sendall(rsa.encrypt(file_name.encode(), server_pub_key))\r\n\r\n # Attempt to download the file\r\n try:\r\n shared.download_file(connection, priv_key, file_name)\r\n\r\n # If the server can't find the file that is asked for\r\n except ValueError:\r\n print(''.join(['\\nThe file does not exist']), file=sys.stderr)", "def get_piece_file(filename):\n \n if 'username'in session:\n \n s3_resource = boto3.resource('s3')\n bucket = s3_resource.Bucket(S3_BUCKET)\n \n file_obj = bucket.Object(filename).get()\n \n return Response(file_obj['Body'].read(),\n mimetype='text/plain',\n headers={'Content-Disposition': f'attachment;filename={filename}'}\n )\n \n flash('You need to be logged in to download files.', 'info')\n return redirect(url_for('login'))", "def download_file(url, direct_access=False, user=None, password=None):\n parsed_uri = urlparse.urlparse(url)\n if parsed_uri.scheme == 'http' or parsed_uri.scheme == 'https':\n tmpfile = tempfile.mktemp()\n # NOTE the stream=True parameter\n # Assign User-Agent to emulate browser\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) '\n 'Gecko/20071127 Firefox/2.0.0.11'\n }\n if user:\n r = requests.get(\n url, headers=headers, stream=True, auth=(user, password))\n else:\n r = requests.get(url, headers=headers, stream=True)\n with open(tmpfile, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n # get extension\n content_disposition = r.headers['content-disposition']\n fname = re.findall(\"filename=[\\'\\\"]?(.+)[\\'\\\"]\", content_disposition)\n _, ext = os.path.splitext(fname[0])\n shutil.move(tmpfile, '%s%s' % (tmpfile, ext))\n tmpfile = '%s%s' % (tmpfile, ext)\n return tmpfile\n elif parsed_uri.scheme == 'file':\n file_path = urllib.unquote_plus(parsed_uri.path).decode('utf-8')\n elif not parsed_uri.scheme:\n file_path = parsed_uri.path\n else:\n raise Exception(\n 'URI scheme not recognized %s' % url)\n\n if direct_access:\n return file_path\n\n tmpfile = tempfile.mktemp()\n shutil.copy(file_path, tmpfile)\n return tmpfile", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def unavoidable_download_method(self, target, name):\n # Get path to file\n file_path = os.path.join(self.work_dir, name)\n\n # Create necessary directories if not present\n self.mkdir_p(self.work_dir)\n\n # Check if file exists, download if not presente\n if not os.path.exists(file_path):\n try:\n subprocess.check_call(['curl', '-fs', self.input_urls[name], '-o', file_path])\n except subprocess.CalledProcessError:\n raise RuntimeError('\\nNecessary file could not be acquired: {}. Check input URL')\n except OSError:\n raise RuntimeError('Failed to find \"curl\". Install via \"apt-get install curl\"')\n\n assert os.path.exists(file_path)\n\n # Update FileStoreID\n target.updateGlobalFile(self.ids[name], file_path)\n\n return file_path", "def download(self, download_path):\n return", "def download_key_files(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path = os.path.join(workpath, './static/BioC/linking.key')\n path1 = os.path.join(workpath, './static/BioC/mention.key')\n ment = request.GET.get('type_key',None)\n if ment == 'mentions':\n path = open(path1, 'r')\n return HttpResponse(path, content_type='text/plain')\n elif ment == 'linking':\n path1 = open(path, 'r')\n return HttpResponse(path1, content_type='text/plain')", "def download(filename):\n return send_from_directory(directory='pdf', filename=filename)", "def download():\n raise NotImplementedError", "def test_download_redirect(self):\n\n fetcher = Fetcher('/unused/root/dir')\n with self.setup_server() as base_url:\n self._URL = base_url\n self.assertFalse(self._URL2_ACCESSED)\n self.assertFalse(self._URL1_ACCESSED)\n\n path = fetcher.download(base_url + '/url2')\n self.assertTrue(self._URL2_ACCESSED)\n self.assertTrue(self._URL1_ACCESSED)\n\n with open(path) as fp:\n self.assertEqual('returned from redirect\\r\\n', fp.read())", "def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)", "def download_file(url, dest=None, force=False, trusted=False):\n url, filename = get_save_path(url, dest, force)\n keep_going = True\n success = False\n if url is None:\n return 'Aborted!'\n\n if url:\n success = download_wget(url, filename, trusted) # Try wget\n if not success:\n success = download_urllib(url, filename) # Try urllib\n if not success:\n success = download_pip(url, filename, force, trusted) # Try urllib\n if not success:\n split_url = url.split('/')\n msg = '\\n'.join([\n \"\\n\\nERROR in Web Access! - You may be behind a firewall!\",\n \"-\" * 52,\n \"You should be able to bybass this by using a browser to download:\",\n \"\\t%s\\nfrom:\\t%s\\nthen copying the download file to:\\n\\t%s\" % (\n split_url[-1], '/'.join(split_url[:-1]), filename),\n ])\n print(msg, '\\n')\n wx.MessageBox(msg, caption='WDOWNLOAD ERROR!',\n style=wx.OK|wx.CENTRE|wx.ICON_ERROR)\n return \"FAILURE or Abort!\"\n\n return filename", "def open_download_keep_file(self):\n self._unable_open_option()\n self._tap_on_confirm_button(yes=False, msg=\"Keep file button\")", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_shoppinglist(request):\n if not (user := request.user):\n return dhttp.HttpResponse(\"Unauthorized\", status = 401)\n\n file = output_supplies(user)\n response = dhttp.HttpResponse(file, content_type='application/force-download')\n response['Content-Disposition'] = f'attachment; filename=\"Shopping List.html\"'\n return response", "async def download_file(\n location_id: LocationID,\n file_id: StorageFileID,\n user_id: UserID,\n link_type: LinkType = LinkType.PRESIGNED,\n):", "def test_file_access_allowed_with_disabled_security(self):\n hooks = setup_hooks(disable_security=True)\n\n result = hooks.act_on_cloned_repo(UNAUTHORIZED_READ_FILE_REPO)\n\n assert result.status == Status.SUCCESS\n assert (\n _output.test_result_header(\n \"FiboTest\",\n NUM_FIBO_TESTS,\n NUM_FIBO_TESTS,\n _output.SUCCESS_COLOR,\n )\n in result.msg\n )", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def download_pdf(self, net_id, request_id, request_date):\n current_user_roles = get_user_roles()\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n if self.make_pdf(net_id, request_id, request_date):\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"[{0}-{1}]_report.pdf\".format(net_id, request_date), mimetype=\"blob\")\n\n return abort(404)\n return abort(403)", "def download_file(filename):\n return send_from_directory('uploads', filename, as_attachment=True)", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download_po(app, request, filename):\n \n filename = secure_filename(filename)\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n \n if file_exists(file_path):\n return send_from_directory(\n app.config['UPLOAD_FOLDER'],\n filename,\n as_attachment=True)\n \n flash('You\\'re trying to download file that are not exists.', 'error')\n return redirect(url_for('home'))", "def download_redirect(id_):\n if check_expired_file(id_):\n return abort(404)\n return redirect(url_for(\"file_handler.download\", id_=id_))", "def download_file(self, filename: str, save_dir: str) -> None:\n raise NotImplementedError()", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def authenticated_files(request, referral_attachment_id):\n user = request.user\n\n # If the user is not logged-in, just bail out\n if not user.is_authenticated:\n return HttpResponse(status=401)\n\n # Get the related referral attachment object or return a 404\n try:\n referral_attachment = ReferralAttachment.objects.get(id=referral_attachment_id)\n except ReferralAttachment.DoesNotExist:\n return HttpResponse(status=404)\n\n # Get the actual filename from the referral attachment (ie. remove the UUID prefix and slash)\n filename = str(referral_attachment.file).rsplit(\"/\", 1)[-1]\n\n # Get the content type and encoding to serve the file as best we can\n content_type, encoding = mimetypes.guess_type(str(filename))\n content_type = content_type or \"application/octet-stream\"\n\n # Actually serve the file using Django's http facilities\n response = FileResponse(\n referral_attachment.file.open(\"rb\"), content_type=content_type\n )\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n if encoding:\n response[\"Content-Encoding\"] = encoding\n\n return response", "def headlessDownloadFile(self, targetUrl, testForCaptcha=None):\n self._wd.get(targetUrl)\n if testForCaptcha:testForCaptcha()\n #time to download\n session = requests.Session()\n cookies = self._wd.get_cookies()\n for cookie in cookies:\n session.cookies.set(cookie['name'], cookie['value'])\n response = session.get(targetUrl)\n return response.content", "def download(self):\n pass", "def download(self):\n pass", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def download(self, url: str, dest: PathLike, force: bool = False):", "def is_downloadable(self):\n return False", "def provide(request, key):\n # Read database\n # This code runs only with python >= 2.6\n# stored_file_obj = Download.objects.get(link_key=key)\n# try:\n# filepath = stored_file_obj.get_path() # model method to validate and deliver path\n# except IsExpiredError as e: # works only with python 2.6 or later (for a solution of older versions see below!)\n# return error(request, e.value)\n\n # alternate code that also run in older python before 2.6\n stored_file_obj = Download.objects.get(link_key=key)\n try:\n filepath = stored_file_obj.get_path() # model method to validate and deliver path\n except IsExpiredError: # works with pyhton before 2.6\n return error(request)\n \n # make file path suitable for different installations\n delimiter = presettings.DYNAMIC_LINK_MEDIA.strip('/').split('/')[-1]\n # now we use the objects get_paht() method to be sure the object instance keep up to date.\n file_path = os.path.normpath(presettings.DYNAMIC_LINK_MEDIA + '/' + filepath.split(delimiter)[-1])\n\n # read file as binary\n try:\n fsocket = open(file_path, 'rb') # garbage collector will deal with not closed fsocket\n except IOError:\n stored_file_obj.active = False\n stored_file_obj.save() # only raise the following once\n return HttpResponseNotFound(unicode(_(u'File not found!'))) # admin will get informed by mail\n\n# # read file as binary\n# try:\n# f = open(file_path, 'rb')\n# except IOError:\n# stored_file_obj.active = False\n# stored_file_obj.save() # only raise the following once\n# return HttpResponseNotFound(unicode(_(u'File not found!'))) # admin will get informed by mail\n# fsocket = f.read()\n# f.close()\n\n # get file parameters\n file_name = os.path.basename(file_path)\n file_size = os.path.getsize(file_path)\n\n # specify mimetype and encoding\n auto_mimetype, auto_encoding = mimetypes.guess_type(file_path)\n if not auto_mimetype: # for unknown types use stream\n auto_mimetype = 'application/octet-stream'\n\n # response object\n response = HttpResponse(fsocket, mimetype=auto_mimetype) # object instance with mimetype and file\n # set headers in the response object\n # a list of headers: http://en.wikipedia.org/wiki/List_of_HTTP_header_fields\n # encode('utf-8') assuming you're running on a server with UTF-8 as the filesystem encoding.\n response['Content-Disposition'] = 'attachment; filename=%s' % file_name.encode('utf-8') # add correct filename\n if auto_encoding and auto_encoding is not 'gzip':\n # set encoding but exclude gzip from encoding headers\n # GZip uses zlib, but on its own zlib produces content that's improperly\n # encoded for a browser seeing 'gzip' as the content encoding.\n response['Content-Encoding'] = auto_encoding\n response['Content-Length'] = file_size # set response file size for the browsers progress bar\n \n return response", "def run(self):\n download(self.attempt)", "def __maybeDownload():\n if not os.path.isdir(Download.DATA_ROOT): # 若 data 目录不存在,创建 data 目录\n os.mkdir(Download.DATA_ROOT)\n file_path = os.path.join(Download.DATA_ROOT, Download.FILE_NAME)\n\n if os.path.exists(file_path): # 若已存在该文件\n statinfo = os.stat(file_path)\n if statinfo.st_size == Download.FILE_SIZE: # 若该文件正确,直接返回 file_path\n print('Found and verified %s' % file_path)\n return file_path\n else: # 否则,删除文件重新下载\n os.remove(file_path)\n\n download_url = Download.URL + Download.FILE_NAME\n print('Downloading %s ...' % download_url)\n filename, _ = urlretrieve(download_url, file_path) # 下载数据\n print('Finish downloading')\n\n statinfo = os.stat(filename)\n if statinfo.st_size == Download.FILE_SIZE: # 校验数据是否正确下载\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser ?')\n return filename", "def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def download_search():\r\n if not LOGGEDIN:\r\n return render_template(\"login_temp.html\", msg=\"\")\r\n return render_template('/download_search')", "def download_file(self, parsed_event, input_dir_path):", "def downloadFile(self, base_url, file_name):\n url = os.path.join(base_url, file_name)\n req = urllib2.Request(url)\n try:\n f = urllib2.urlopen(req, timeout=self.timeout)\n local_file = open(os.path.join(self.config.get('PATHS', 'pdfdir'), file_name), \"w\")\n local_file.write(f.read())\n local_file.close()\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR in downloadFile: %s\" % err\n sys.exit(0)", "def pre_download(self, remote_files):\n pass", "def download_file():\n\n if 'POST' == request.method:\n file_id = request.form['file_id']\n else:\n file_id = request.args.get('file_id')\n\n # 1 ==> example_1.tgz\n file_path = file_manager.get_file_path_from_id(file_id)\n print \"serving file: \" + file_path\n return send_file(file_path, as_attachment=True)", "def test_read_unauthenticated(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def is_downloadable(self):\n return True", "def employee_download_file(request, employee_id, filename):\n\n current_user = Employee.objects.get(user__pk=request.user.pk)\n\n if not current_user.hasAccessTo(employee_id):\n logUnauthorizedAccess(\n \"User tried to download file he didnt have access to\", request, filename\n )\n return HttpResponse('unauthorized', status=401)\n\n user_dir = util.get_user_files_dir(employee_id)\n filename = os.path.join(user_dir, filename.replace('..', ''))\n\n if not os.path.isfile(filename):\n return HttpResponseNotFound('File does not exist')\n\n wrapper = FileWrapper(file(filename))\n\n ext = os.path.splitext(filename)[1].lower()\n\n response = HttpResponse(\n wrapper, # i'd rather do this hack than use urllib.pathname2url\n content_type=MimeTypes().guess_type('/bogus/path/bogus_file' + ext)\n )\n response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(filename)\n response['Content-Length'] = os.path.getsize(filename)\n\n return response", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def file_download_url_mapper(request, shortkey, filename):\n\n authorize(request, shortkey, view=True, edit=True, full=True, superuser=True)\n irods_file_path = '/'.join(request.path.split('/')[2:-1])\n istorage = IrodsStorage()\n file_download_url = istorage.url(irods_file_path)\n return HttpResponseRedirect(file_download_url)", "def is_restricted_download(self):\n return self.has_label(RESTRICTEDDOWNLOAD_LABEL)", "def download(urls, dest_folder):\n pass", "def download_file(self, remote_file):\n remote_file.download()", "def download_file(url, fn, cookiejar, cookies_file, wget_bin):\n\ttry:\n\t\t# create the path if need be\n\t\tbasedir = os.path.dirname(fn)\n\t\tif not os.path.isdir(basedir):\n\t\t\tos.makedirs(basedir)\n\n\t\tif wget_bin is not None:\n\t\t\tdownload_file_wget(wget_bin, url, fn, cookies_file)\n\t\telse:\n\t\t\tdownload_file_nowget(url, fn, cookiejar)\n\n\texcept KeyboardInterrupt, e: \n\t\tprint \"\\nKeyboard Interrupt -- Removing partial file:\", fn\n\t\tos.remove(fn)\n\n\t\traise e", "def download():\n try:\n response = send_from_directory(\n app.config.get(\"DATA_DIR\"), \"whiteboard.zip\", as_attachment=True\n )\n\n # change headers to stop browser from delivering cached version\n response.headers[\"Last-Modified\"] = datetime.now()\n response.headers[\n \"Cache-Control\"\n ] = \"no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"-1\"\n\n return response\n\n except:\n return traceback.format_exc()", "def uploaded_files_are_not_shown_on_uploadpage(self,filenames,user):\n \n response = self.get_uploadpage_response(user,self.testproject)\n \n for filename in filenames:\n self.assertTrue(filename not in response.content,\"Restricted file\"\n \" '%s' was visible on download page when viewed\"\n \" by user %s\"\n % (filename,user.username))", "def download_file(file_directory, file_path, login_request, user_id):\n\n data, headers, server_host, server_port = process_request_header(file_directory, file_path, login_request, user_id)\n\n request = requests.post(\"http://\" + server_host + \":\" + server_port + \"/fileOperations/downloadFile\",\n headers=headers)\n file = open(\"../\" + request.text)\n\n return file", "def download(self, download_request):\n raise NotImplementedError", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def download(filename):\n print \"Downloading\", filename\n file_content = urlopen(\n urljoin(URL_PATH, filename)\n )\n write_data_to_file(\n file_content.read(),\n os.path.join(\n '/tmp',\n filename\n )\n )", "def download_file(driver, link, filename):\n download_path = os.path.join(os.environ['HOME'], \"Downloads\", filename)\n # TODO: copy cookies, user agent, ect to session\n s = requests.session()\n r = s.get(link, stream=True)\n with open(download_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n return download_path", "def download(all):\n print(\"Downloading\")", "def https_download_file(**data):\n import os\n import requests\n\n ##minimal data inputs payload\n server_url = data.get('server_url', '')\n file_name = data.get('file_name', '')\n file_path = data.get('file_path', '')\n headers = data.get('headers', '')\n ##extra data inputs payload\n ##\n ##\n\n if server_url==None:\n raise(NameError('No `server URL` specified'))\n \n if file_name==None:\n raise(NameError('No `file_name` specified'))\n\n file_url = os.path.join(server_url,file_name)\n\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n\n full_name = os.path.join(file_path,file_name)\n \n if not os.path.isfile(full_name):\n r = requests.get(file_url, headers=headers)\n if not r.status_code==200: \n raise r.raise_for_status()\n open(full_name , 'wb').write(r.content)\n\n return full_name" ]
[ "0.6933737", "0.67762506", "0.6603779", "0.65035725", "0.6269322", "0.6244334", "0.6117313", "0.6104816", "0.6099837", "0.60964644", "0.6042637", "0.60026896", "0.59894454", "0.59013104", "0.58944786", "0.5857168", "0.58530647", "0.58518803", "0.5833485", "0.5799934", "0.5795556", "0.57929045", "0.5782133", "0.575195", "0.5746243", "0.5745769", "0.5740088", "0.5736157", "0.57217735", "0.5718836", "0.56970006", "0.56962013", "0.5693035", "0.5676699", "0.56607157", "0.564942", "0.564942", "0.56305975", "0.5618994", "0.56108636", "0.5609617", "0.56049067", "0.55978495", "0.5597043", "0.5583048", "0.5581544", "0.55640453", "0.55534756", "0.5553169", "0.5551905", "0.55483", "0.55436516", "0.55424774", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.5542299", "0.553868", "0.5529657", "0.5527789", "0.5527306", "0.5499562", "0.5499065", "0.5496991", "0.5496756", "0.5493695", "0.5488269", "0.5479618", "0.54778093", "0.5472788", "0.5467429" ]
0.0
-1
Securely download files from user.
def employee_download_file(request, employee_id, filename): current_user = Employee.objects.get(user__pk=request.user.pk) if not current_user.hasAccessTo(employee_id): logUnauthorizedAccess( "User tried to download file he didnt have access to", request, filename ) return HttpResponse('unauthorized', status=401) user_dir = util.get_user_files_dir(employee_id) filename = os.path.join(user_dir, filename.replace('..', '')) if not os.path.isfile(filename): return HttpResponseNotFound('File does not exist') wrapper = FileWrapper(file(filename)) ext = os.path.splitext(filename)[1].lower() response = HttpResponse( wrapper, # i'd rather do this hack than use urllib.pathname2url content_type=MimeTypes().guess_type('/bogus/path/bogus_file' + ext) ) response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(filename) response['Content-Length'] = os.path.getsize(filename) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restricted_download():\n aaa.require(fail_redirect='/login')\n return bottle.static_file('static_file', root='.')", "def download_files(self):", "def download_file(self, net_id, request_id, file_name):\n current_user_roles = get_user_roles()\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n try:\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"{0}\".format(secure_filename(file_name)), mimetype=\"blob\")\n except Exception as e:\n print(e)\n return abort(404)\n return abort(403)", "def download_file(file_ext, file_name):\n\n log_app.debug(\"file_name : %s \", \tfile_name)\n log_app.debug(\"file_ext : %s \", \tfile_ext)\n log_app.info(\"file_ext in AUTHORIZED_FILETYPES_LIST: %s\", (file_ext in AUTHORIZED_FILETYPES_LIST) )\n\n\n if file_ext in AUTHORIZED_FILETYPES_LIST :\n\n file_mimetype \t\t= AUTHORIZED_FILETYPES_DICT[file_ext][\"mimetype\"]\n file_foldername \t= AUTHORIZED_FILETYPES_DICT[file_ext][\"folder\"]\n file_folder \t\t= \"static/{}/\".format(file_foldername)\n file_name_ext \t\t= \"{}.{}\".format(file_name, file_ext)\n full_filepath \t\t= file_folder + file_name_ext\n\n try :\n\n return send_file(\tfull_filepath,\n mimetype\t\t\t= file_mimetype,\n attachment_filename\t= file_name_ext,\n as_attachment\t\t= True\n )\n except :\n\n log_app.error(\"downloading this file is not working: %s.%s \", file_name, file_ext )\n\n return redirect(url_for('home'))\n\n else :\n\n log_app.error(\"downloading this file is not authorized: %s.%s \", file_name, file_ext )\n\n return redirect(url_for('home'))", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def download(self, net_id, request_id, request_date):\n current_user_roles = get_user_roles()\n\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n if self.make_pdf(net_id, request_id, request_date):\n try:\n with ZipFile(\"{0}/user_uploads/{1}/{2}/[{1}-{3}]_request.zip\".format(self.__APP_PATH__, net_id, request_id, request_date), mode=\"w\") as zip_archive:\n for user_file in scandir(\"{0}/user_uploads/{1}/{2}\".format(self.__APP_PATH__, net_id, request_id)):\n if \"_request.zip\" not in user_file.name and user_file.name not in self.__SPECIAL_FILES__:\n zip_archive.write(user_file.path, user_file.name, ZIP_DEFLATED)\n\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"[{0}-{1}]_request.zip\".format(net_id, request_date), mimetype=\"blob\")\n except Exception as e:\n print(e)\n return abort(404)\n return abort(403)", "def get_feedback_file(filename):\n \n if 'username'in session:\n \n s3_resource = boto3.resource('s3')\n bucket = s3_resource.Bucket(S3_BUCKET)\n \n file_obj = bucket.Object(filename).get()\n \n return Response(file_obj['Body'].read(),\n mimetype='text/plain',\n headers={'Content-Disposition': f'attachment;filename={filename}'}\n )\n \n flash('You need to be logged in to download files.', 'info')\n return redirect(url_for('login'))", "def download(connection, priv_key, server_pub_key):\r\n\r\n # Get the filename from the user\r\n file_name = input('What file would you like to download from the server?: ')\r\n\r\n # Tell the server to prepare to download a file\r\n connection.sendall(rsa.encrypt(b'DOWNLOAD', server_pub_key))\r\n\r\n # Send the file name to the server\r\n connection.sendall(rsa.encrypt(file_name.encode(), server_pub_key))\r\n\r\n # Attempt to download the file\r\n try:\r\n shared.download_file(connection, priv_key, file_name)\r\n\r\n # If the server can't find the file that is asked for\r\n except ValueError:\r\n print(''.join(['\\nThe file does not exist']), file=sys.stderr)", "def get_piece_file(filename):\n \n if 'username'in session:\n \n s3_resource = boto3.resource('s3')\n bucket = s3_resource.Bucket(S3_BUCKET)\n \n file_obj = bucket.Object(filename).get()\n \n return Response(file_obj['Body'].read(),\n mimetype='text/plain',\n headers={'Content-Disposition': f'attachment;filename={filename}'}\n )\n \n flash('You need to be logged in to download files.', 'info')\n return redirect(url_for('login'))", "def download_file(url, direct_access=False, user=None, password=None):\n parsed_uri = urlparse.urlparse(url)\n if parsed_uri.scheme == 'http' or parsed_uri.scheme == 'https':\n tmpfile = tempfile.mktemp()\n # NOTE the stream=True parameter\n # Assign User-Agent to emulate browser\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) '\n 'Gecko/20071127 Firefox/2.0.0.11'\n }\n if user:\n r = requests.get(\n url, headers=headers, stream=True, auth=(user, password))\n else:\n r = requests.get(url, headers=headers, stream=True)\n with open(tmpfile, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\n # get extension\n content_disposition = r.headers['content-disposition']\n fname = re.findall(\"filename=[\\'\\\"]?(.+)[\\'\\\"]\", content_disposition)\n _, ext = os.path.splitext(fname[0])\n shutil.move(tmpfile, '%s%s' % (tmpfile, ext))\n tmpfile = '%s%s' % (tmpfile, ext)\n return tmpfile\n elif parsed_uri.scheme == 'file':\n file_path = urllib.unquote_plus(parsed_uri.path).decode('utf-8')\n elif not parsed_uri.scheme:\n file_path = parsed_uri.path\n else:\n raise Exception(\n 'URI scheme not recognized %s' % url)\n\n if direct_access:\n return file_path\n\n tmpfile = tempfile.mktemp()\n shutil.copy(file_path, tmpfile)\n return tmpfile", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def unavoidable_download_method(self, target, name):\n # Get path to file\n file_path = os.path.join(self.work_dir, name)\n\n # Create necessary directories if not present\n self.mkdir_p(self.work_dir)\n\n # Check if file exists, download if not presente\n if not os.path.exists(file_path):\n try:\n subprocess.check_call(['curl', '-fs', self.input_urls[name], '-o', file_path])\n except subprocess.CalledProcessError:\n raise RuntimeError('\\nNecessary file could not be acquired: {}. Check input URL')\n except OSError:\n raise RuntimeError('Failed to find \"curl\". Install via \"apt-get install curl\"')\n\n assert os.path.exists(file_path)\n\n # Update FileStoreID\n target.updateGlobalFile(self.ids[name], file_path)\n\n return file_path", "def download(self, download_path):\n return", "def download_key_files(request):\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path = os.path.join(workpath, './static/BioC/linking.key')\n path1 = os.path.join(workpath, './static/BioC/mention.key')\n ment = request.GET.get('type_key',None)\n if ment == 'mentions':\n path = open(path1, 'r')\n return HttpResponse(path, content_type='text/plain')\n elif ment == 'linking':\n path1 = open(path, 'r')\n return HttpResponse(path1, content_type='text/plain')", "def download(filename):\n return send_from_directory(directory='pdf', filename=filename)", "def download():\n raise NotImplementedError", "def test_download_redirect(self):\n\n fetcher = Fetcher('/unused/root/dir')\n with self.setup_server() as base_url:\n self._URL = base_url\n self.assertFalse(self._URL2_ACCESSED)\n self.assertFalse(self._URL1_ACCESSED)\n\n path = fetcher.download(base_url + '/url2')\n self.assertTrue(self._URL2_ACCESSED)\n self.assertTrue(self._URL1_ACCESSED)\n\n with open(path) as fp:\n self.assertEqual('returned from redirect\\r\\n', fp.read())", "def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)", "def download_file(url, dest=None, force=False, trusted=False):\n url, filename = get_save_path(url, dest, force)\n keep_going = True\n success = False\n if url is None:\n return 'Aborted!'\n\n if url:\n success = download_wget(url, filename, trusted) # Try wget\n if not success:\n success = download_urllib(url, filename) # Try urllib\n if not success:\n success = download_pip(url, filename, force, trusted) # Try urllib\n if not success:\n split_url = url.split('/')\n msg = '\\n'.join([\n \"\\n\\nERROR in Web Access! - You may be behind a firewall!\",\n \"-\" * 52,\n \"You should be able to bybass this by using a browser to download:\",\n \"\\t%s\\nfrom:\\t%s\\nthen copying the download file to:\\n\\t%s\" % (\n split_url[-1], '/'.join(split_url[:-1]), filename),\n ])\n print(msg, '\\n')\n wx.MessageBox(msg, caption='WDOWNLOAD ERROR!',\n style=wx.OK|wx.CENTRE|wx.ICON_ERROR)\n return \"FAILURE or Abort!\"\n\n return filename", "def open_download_keep_file(self):\n self._unable_open_option()\n self._tap_on_confirm_button(yes=False, msg=\"Keep file button\")", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def download_shoppinglist(request):\n if not (user := request.user):\n return dhttp.HttpResponse(\"Unauthorized\", status = 401)\n\n file = output_supplies(user)\n response = dhttp.HttpResponse(file, content_type='application/force-download')\n response['Content-Disposition'] = f'attachment; filename=\"Shopping List.html\"'\n return response", "async def download_file(\n location_id: LocationID,\n file_id: StorageFileID,\n user_id: UserID,\n link_type: LinkType = LinkType.PRESIGNED,\n):", "def test_file_access_allowed_with_disabled_security(self):\n hooks = setup_hooks(disable_security=True)\n\n result = hooks.act_on_cloned_repo(UNAUTHORIZED_READ_FILE_REPO)\n\n assert result.status == Status.SUCCESS\n assert (\n _output.test_result_header(\n \"FiboTest\",\n NUM_FIBO_TESTS,\n NUM_FIBO_TESTS,\n _output.SUCCESS_COLOR,\n )\n in result.msg\n )", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def download_pdf(self, net_id, request_id, request_date):\n current_user_roles = get_user_roles()\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n if self.make_pdf(net_id, request_id, request_date):\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"[{0}-{1}]_report.pdf\".format(net_id, request_date), mimetype=\"blob\")\n\n return abort(404)\n return abort(403)", "def download_file(filename):\n return send_from_directory('uploads', filename, as_attachment=True)", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download_po(app, request, filename):\n \n filename = secure_filename(filename)\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n \n if file_exists(file_path):\n return send_from_directory(\n app.config['UPLOAD_FOLDER'],\n filename,\n as_attachment=True)\n \n flash('You\\'re trying to download file that are not exists.', 'error')\n return redirect(url_for('home'))", "def download_redirect(id_):\n if check_expired_file(id_):\n return abort(404)\n return redirect(url_for(\"file_handler.download\", id_=id_))", "def download_file(self, filename: str, save_dir: str) -> None:\n raise NotImplementedError()", "def test_read_unauthorized(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n self.client.force_authenticate(user=self.test_user2_obj)\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def authenticated_files(request, referral_attachment_id):\n user = request.user\n\n # If the user is not logged-in, just bail out\n if not user.is_authenticated:\n return HttpResponse(status=401)\n\n # Get the related referral attachment object or return a 404\n try:\n referral_attachment = ReferralAttachment.objects.get(id=referral_attachment_id)\n except ReferralAttachment.DoesNotExist:\n return HttpResponse(status=404)\n\n # Get the actual filename from the referral attachment (ie. remove the UUID prefix and slash)\n filename = str(referral_attachment.file).rsplit(\"/\", 1)[-1]\n\n # Get the content type and encoding to serve the file as best we can\n content_type, encoding = mimetypes.guess_type(str(filename))\n content_type = content_type or \"application/octet-stream\"\n\n # Actually serve the file using Django's http facilities\n response = FileResponse(\n referral_attachment.file.open(\"rb\"), content_type=content_type\n )\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n if encoding:\n response[\"Content-Encoding\"] = encoding\n\n return response", "def headlessDownloadFile(self, targetUrl, testForCaptcha=None):\n self._wd.get(targetUrl)\n if testForCaptcha:testForCaptcha()\n #time to download\n session = requests.Session()\n cookies = self._wd.get_cookies()\n for cookie in cookies:\n session.cookies.set(cookie['name'], cookie['value'])\n response = session.get(targetUrl)\n return response.content", "def download(self):\n pass", "def download(self):\n pass", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def download(self, url: str, dest: PathLike, force: bool = False):", "def is_downloadable(self):\n return False", "def provide(request, key):\n # Read database\n # This code runs only with python >= 2.6\n# stored_file_obj = Download.objects.get(link_key=key)\n# try:\n# filepath = stored_file_obj.get_path() # model method to validate and deliver path\n# except IsExpiredError as e: # works only with python 2.6 or later (for a solution of older versions see below!)\n# return error(request, e.value)\n\n # alternate code that also run in older python before 2.6\n stored_file_obj = Download.objects.get(link_key=key)\n try:\n filepath = stored_file_obj.get_path() # model method to validate and deliver path\n except IsExpiredError: # works with pyhton before 2.6\n return error(request)\n \n # make file path suitable for different installations\n delimiter = presettings.DYNAMIC_LINK_MEDIA.strip('/').split('/')[-1]\n # now we use the objects get_paht() method to be sure the object instance keep up to date.\n file_path = os.path.normpath(presettings.DYNAMIC_LINK_MEDIA + '/' + filepath.split(delimiter)[-1])\n\n # read file as binary\n try:\n fsocket = open(file_path, 'rb') # garbage collector will deal with not closed fsocket\n except IOError:\n stored_file_obj.active = False\n stored_file_obj.save() # only raise the following once\n return HttpResponseNotFound(unicode(_(u'File not found!'))) # admin will get informed by mail\n\n# # read file as binary\n# try:\n# f = open(file_path, 'rb')\n# except IOError:\n# stored_file_obj.active = False\n# stored_file_obj.save() # only raise the following once\n# return HttpResponseNotFound(unicode(_(u'File not found!'))) # admin will get informed by mail\n# fsocket = f.read()\n# f.close()\n\n # get file parameters\n file_name = os.path.basename(file_path)\n file_size = os.path.getsize(file_path)\n\n # specify mimetype and encoding\n auto_mimetype, auto_encoding = mimetypes.guess_type(file_path)\n if not auto_mimetype: # for unknown types use stream\n auto_mimetype = 'application/octet-stream'\n\n # response object\n response = HttpResponse(fsocket, mimetype=auto_mimetype) # object instance with mimetype and file\n # set headers in the response object\n # a list of headers: http://en.wikipedia.org/wiki/List_of_HTTP_header_fields\n # encode('utf-8') assuming you're running on a server with UTF-8 as the filesystem encoding.\n response['Content-Disposition'] = 'attachment; filename=%s' % file_name.encode('utf-8') # add correct filename\n if auto_encoding and auto_encoding is not 'gzip':\n # set encoding but exclude gzip from encoding headers\n # GZip uses zlib, but on its own zlib produces content that's improperly\n # encoded for a browser seeing 'gzip' as the content encoding.\n response['Content-Encoding'] = auto_encoding\n response['Content-Length'] = file_size # set response file size for the browsers progress bar\n \n return response", "def run(self):\n download(self.attempt)", "def __maybeDownload():\n if not os.path.isdir(Download.DATA_ROOT): # 若 data 目录不存在,创建 data 目录\n os.mkdir(Download.DATA_ROOT)\n file_path = os.path.join(Download.DATA_ROOT, Download.FILE_NAME)\n\n if os.path.exists(file_path): # 若已存在该文件\n statinfo = os.stat(file_path)\n if statinfo.st_size == Download.FILE_SIZE: # 若该文件正确,直接返回 file_path\n print('Found and verified %s' % file_path)\n return file_path\n else: # 否则,删除文件重新下载\n os.remove(file_path)\n\n download_url = Download.URL + Download.FILE_NAME\n print('Downloading %s ...' % download_url)\n filename, _ = urlretrieve(download_url, file_path) # 下载数据\n print('Finish downloading')\n\n statinfo = os.stat(filename)\n if statinfo.st_size == Download.FILE_SIZE: # 校验数据是否正确下载\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser ?')\n return filename", "def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def download_search():\r\n if not LOGGEDIN:\r\n return render_template(\"login_temp.html\", msg=\"\")\r\n return render_template('/download_search')", "def download_file(self, parsed_event, input_dir_path):", "def downloadFile(self, base_url, file_name):\n url = os.path.join(base_url, file_name)\n req = urllib2.Request(url)\n try:\n f = urllib2.urlopen(req, timeout=self.timeout)\n local_file = open(os.path.join(self.config.get('PATHS', 'pdfdir'), file_name), \"w\")\n local_file.write(f.read())\n local_file.close()\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR in downloadFile: %s\" % err\n sys.exit(0)", "def download_file():\n\n if 'POST' == request.method:\n file_id = request.form['file_id']\n else:\n file_id = request.args.get('file_id')\n\n # 1 ==> example_1.tgz\n file_path = file_manager.get_file_path_from_id(file_id)\n print \"serving file: \" + file_path\n return send_file(file_path, as_attachment=True)", "def pre_download(self, remote_files):\n pass", "def test_read_unauthenticated(self):\n\n url = reverse('file', kwargs={'file_id': str(self.file.id)})\n\n data = {}\n\n response = self.client.get(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def is_downloadable(self):\n return True", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def file_download_url_mapper(request, shortkey, filename):\n\n authorize(request, shortkey, view=True, edit=True, full=True, superuser=True)\n irods_file_path = '/'.join(request.path.split('/')[2:-1])\n istorage = IrodsStorage()\n file_download_url = istorage.url(irods_file_path)\n return HttpResponseRedirect(file_download_url)", "def is_restricted_download(self):\n return self.has_label(RESTRICTEDDOWNLOAD_LABEL)", "def download(urls, dest_folder):\n pass", "def download_file(self, remote_file):\n remote_file.download()", "def download_file(url, fn, cookiejar, cookies_file, wget_bin):\n\ttry:\n\t\t# create the path if need be\n\t\tbasedir = os.path.dirname(fn)\n\t\tif not os.path.isdir(basedir):\n\t\t\tos.makedirs(basedir)\n\n\t\tif wget_bin is not None:\n\t\t\tdownload_file_wget(wget_bin, url, fn, cookies_file)\n\t\telse:\n\t\t\tdownload_file_nowget(url, fn, cookiejar)\n\n\texcept KeyboardInterrupt, e: \n\t\tprint \"\\nKeyboard Interrupt -- Removing partial file:\", fn\n\t\tos.remove(fn)\n\n\t\traise e", "def download():\n try:\n response = send_from_directory(\n app.config.get(\"DATA_DIR\"), \"whiteboard.zip\", as_attachment=True\n )\n\n # change headers to stop browser from delivering cached version\n response.headers[\"Last-Modified\"] = datetime.now()\n response.headers[\n \"Cache-Control\"\n ] = \"no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"-1\"\n\n return response\n\n except:\n return traceback.format_exc()", "def uploaded_files_are_not_shown_on_uploadpage(self,filenames,user):\n \n response = self.get_uploadpage_response(user,self.testproject)\n \n for filename in filenames:\n self.assertTrue(filename not in response.content,\"Restricted file\"\n \" '%s' was visible on download page when viewed\"\n \" by user %s\"\n % (filename,user.username))", "def download_file(file_directory, file_path, login_request, user_id):\n\n data, headers, server_host, server_port = process_request_header(file_directory, file_path, login_request, user_id)\n\n request = requests.post(\"http://\" + server_host + \":\" + server_port + \"/fileOperations/downloadFile\",\n headers=headers)\n file = open(\"../\" + request.text)\n\n return file", "def download(self, download_request):\n raise NotImplementedError", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def download(filename):\n print \"Downloading\", filename\n file_content = urlopen(\n urljoin(URL_PATH, filename)\n )\n write_data_to_file(\n file_content.read(),\n os.path.join(\n '/tmp',\n filename\n )\n )", "def download_file(driver, link, filename):\n download_path = os.path.join(os.environ['HOME'], \"Downloads\", filename)\n # TODO: copy cookies, user agent, ect to session\n s = requests.session()\n r = s.get(link, stream=True)\n with open(download_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n return download_path", "def download(all):\n print(\"Downloading\")", "def download():\n return response.download(request,db)" ]
[ "0.6934367", "0.6775749", "0.6602642", "0.6503681", "0.626969", "0.62442297", "0.6117034", "0.6105623", "0.6099257", "0.6096529", "0.6040991", "0.60035723", "0.59876895", "0.5901248", "0.58960354", "0.58567363", "0.5854076", "0.5851294", "0.5834396", "0.5800784", "0.57954973", "0.5794253", "0.5781241", "0.57522565", "0.5746397", "0.5746032", "0.57407814", "0.57368326", "0.5721553", "0.57207245", "0.569788", "0.56961817", "0.5693607", "0.56756884", "0.5660493", "0.5648388", "0.5648388", "0.56313246", "0.56193715", "0.5611281", "0.56111187", "0.5603768", "0.5599084", "0.5596371", "0.55832386", "0.5581885", "0.55637896", "0.55545413", "0.55527997", "0.5552768", "0.55486435", "0.5543822", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.55429375", "0.5539311", "0.55297244", "0.5528583", "0.5526641", "0.55004424", "0.5500069", "0.5496996", "0.54961467", "0.54915625", "0.54887384", "0.54810554", "0.54778415", "0.5473395", "0.5467942" ]
0.5542597
86
View for detail of employee
def employee_detail(request, employee_id): current_employee = Employee.objects.get(user__pk=request.user.pk) employee = Employee.objects.get(pk=int(employee_id)) if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: raise PermissionDenied() actions = employee.action_set.all() if not current_employee.pk == int(employee_id): if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk: if not current_employee.isCompanySuperUserOrHigher(): return HttpResponse('unauthorized', status=401) user_files = get_files_for_employee(employee_id) if request.method == 'POST': upload_form = UploadFileToEmployeyForm(request.POST, request.FILES) form = EmployeeNoteForm(request.POST, instance=employee) if 'upload' in request.POST: if upload_form.is_valid(): upload_form.handle_upload(employee_id, request.FILES['file']) return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id)) else: if form.is_valid(): form.save(request.user, employee) return HttpResponseRedirect('/employee/show/%d' % form.instance.pk) else: form = EmployeeNoteForm(instance=employee) upload_form = UploadFileToEmployeyForm() data = {} data["first_name"] = employee.user.first_name data["last_name"] = employee.user.last_name data["email"] = employee.user.email data["is_manager"] = employee.is_manager data["language_code"] = employee.language_code employee_role = EmployeeRole.objects.filter(employee=employee).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) data["roles"] = name_role_list return JsonResponse(status=201, data=data) # return TemplateResponse( # request, # 'mus/detail.html', # { # 'actions': actions, # 'employee': employee, # # 'development_plans': development_plans, # 'form': form, # 'upload_form': upload_form, # 'user_files': user_files # } # )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def employee():\n return Response(render_template('employee/employee.html'))", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)", "def details(request):\n\treturn render(request, 'ExcelApp/main.html')", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def employee_profile_page(cls, employee_id):\n return cls.__profile_page(employee_id, cls._logger)", "def home(request):\n\n\tcontext_dict = {}\n\temployee = models.Teacher.objects.filter(\n\t\tuser=request.user\n\t).first()\n\t# context_dict = {\n\t# context_helper.get_emp_info(employee)\n\t# }\n\t# print (context_dict)\n\tcontext_dict.update(context_helper.get_emp_info(employee))\n\treturn render(request, \"home.html\", context_dict)", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def profile_page(cls, employee_id, logger=None):\n if logger is None:\n logger = cls._logger\n\n database_connection = DatabaseConnection(f\"employees.csv\")\n table = database_connection.table\n employee = Employee(employee_id)\n\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n\n while True:\n\n choice = input(\n \"Please choose: \"\n \"(1) check data, \"\n \"(2) update first name, \"\n \"(3) update last name, \"\n \"(4) save changes, \"\n \"(5) exit without saving \"\n )\n if choice not in ('1', '2', '3', '4', '5'):\n logger.log(\"Please pick a valid choice\")\n elif choice=='1':\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n elif choice=='2':\n first_name = input(\"Enter your first name: \")\n employee.set_first_name(first_name)\n elif choice=='3':\n last_name = input(\"Enter your last name: \")\n employee.set_last_name(last_name)\n elif choice=='4':\n table[\n (table['employee_id']==employee.get_employee_id())\n ] = pd.Series(\n {'employee_id': employee.get_employee_id(),\n 'first_name': employee.get_first_name(),\n 'last_name': employee.get_last_name(),\n }\n )\n database_connection.overwrite()\n logger.log(\"Information saved!\")\n else:\n break", "def get(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n ).data\n }\n return Response(response)", "def occupation(request, pk):\r\n occupation = get_object_or_404(Occupation, pk=pk)\r\n return HttpResponse('Occupation: %s' % occupation)", "def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def detail(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n user = request.user\n return render(request, 'kvent/event-detail.html', {'event': event, 'user': user})", "def employee(self) -> object:\n return self._employee", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def event_collaborator_detail(request, event_id, collaborator_id):\n if request.method == 'GET':\n event = get_object_or_404(Event, pk=event_id)\n collaborator = Employee.objects.all().filter(event=event, pk=collaborator_id)\n if collaborator:\n is_registered = True\n else:\n is_registered = False\n serializer = CollaboratorAttendanceSerializer(event, context={'is_registered': is_registered})\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})", "def display_certs(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n certs = employee_certification.query.filter_by(employee_id = employee_id).all()\n \n all_certs = Cert.query.all()\n \n return render_template(\"users/display_cert.html\", employee = employee, certs = certs, all_certs = all_certs)", "def get(self, request):\n employee = EmployeeDetail.objects.all()\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n many=True\n ).data\n }\n return Response(response)", "def __str__(self):\n return \"Employee attributes {}, {}, {} ,{}, {}, {}\". \\\n format(self._last_name, self._first_name, self._address, self._phone_number,\n self._start_date, self._salary)", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def retrieve(self, request, pk=None):\n try:\n team_employee = self.get_team_employee_object(pk)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(team_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def view_experiment(request,id):\n\texp = Experiment.objects.get(id=id)\n\tpossibly_related = get_related(exp)\n\treturn list_detail.object_detail(request,\n\t\t\t\t\t\t\t\t\tqueryset=Experiment.objects.filter(id=id),\n\t\t\t\t\t\t\t\t\tobject_id=exp.id,\n\t\t\t\t\t\t\t\t\ttemplate_name='experiments/experiment.html',\n\t\t\t\t\t\t\t\t\textra_context= {\"possibly_related\" : possibly_related})", "def event_detail():\n # getting event id from homepage \n event_id = request.args.get('eventId')\n # counting the total number of registeration for an event.\n registrant_count = db.session.query(Register).filter(Register.event_id ==event_id).count()\n event = db.session.query(Event).filter(Event.event_id == event_id).first()\n format = '%a %I:%M %p %b %d, %y'\n event.date = event.date.strftime(format)\n event.time = event.time.strftime(format)\n location = event.location\n return render_template(\"event.html\", event= event, registrant_count=registrant_count)", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def delegation_show(request, pk):\n delegation = Delegation.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(delegation_id=pk).order_by(\"committee__name\")\n\n context = {\"delegation\": delegation, \"delegates\": delegates, \"delegation_show\": True}\n template = \"jurycore/delegation_show.html\"\n return render(request, template, context)", "def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)", "def get_employees(self):\n return self.employees", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def get(self, uuid: str):\n try:\n employee = self.service.get_employee_by_uuid(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def event_participant_detail(request, event_id, participant_id):\n if request.method == 'GET':\n event = get_object_or_404(Event, pk=event_id)\n participant = get_object_or_404(Participant, pk=participant_id)\n try:\n attendance = Attendance.objects.get(event=event, participant=participant)\n except:\n attendance = Attendance(participant=participant, event=event, is_registered=False)\n serializer = AttendanceSerializer(attendance)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def badges_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_bages = EmployeeBadge.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_bages, request)\n serializer = EmployeeBadgeSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def get_details(office_id):\n\n office = OfficeModel()\n office_exists = office.get_one(office_id)\n print(office)\n if office_exists is not None:\n return make_response(jsonify(\n {'status': 200, 'data': office.sub_set()}\n ), 200)\n\n return make_response(jsonify(\n {'status': 404,\n \"error\": 'Office with id {} not found'.format(office_id)}\n ), 404)", "def get_emp_data(self,employee):\n\t\temp = None\n\t\tfind_by = employee.find_elements_by_tag_name\n\t\tif str(type(employee)) != \"<type 'NoneType'>\" and main.is_desktop():\n\t\t\t# columns = employee.find_elements_by_tag_name(\"td\")\n\t\t\temp = {\n\t\t\t\t'name': find_by('td')[0].text,\n\t\t\t\t'id': find_by('td')[1].text,\n\t\t\t\t'status': find_by('td')[2].text,\n\t\t\t\t'election': find_by('td')[3].text,\n\t\t\t\t'date_changed': find_by('td')[4].text\n\t\t\t}\n\t\telif str(type(employee)) != \"<type 'NoneType'>\":\n\t\t\temp = {\n\t\t\t\t'name': find_by('div')[2].text,\n\t\t\t\t'id': find_by('div')[3].text[13:],\n\t\t\t\t'status': find_by('div')[4].text[8:], #Fail 4:20p, StaleEl\n\t\t\t\t'election': find_by('div')[5].text[17:], #Fail 4:15p, StaleEl\n\t\t\t\t'date_changed': find_by('div')[6].text[14:]\n\t\t\t}\n\n\t\t# raw_input(str(emp))\n\t\treturn emp", "def retrieve(self, request, pk=None):\n\n try:\n expense = Expenses.objects.get(pk=pk)\n serializer = ExpenseSerializer(\n expense, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def holiday_detail(request, holiday_id):\n\n all_holidays = Product.objects.filter(is_holiday=True)\n holiday = get_object_or_404(all_holidays, pk=holiday_id)\n itinerary = Itinerary.objects.get(holiday=holiday)\n itinerary_day = ItineraryDay.objects.filter(itinerary=itinerary)\n faq = Faq.objects.filter(holiday=holiday)\n\n context = {\n 'holiday': holiday,\n 'itinerary': itinerary,\n 'itinerary_day': itinerary_day,\n 'faq': faq,\n }\n\n return render(request, 'products/holiday_detail.html', context)", "def computer_detail(request, computer_id):\n\n computer = get_object_or_404(Computer, pk=computer_id)\n current_assignment = EmployeeComputer.objects.filter(computer_id=computer_id).filter(date_revoked=None)\n assignment_history = EmployeeComputer.objects.filter(computer_id=computer_id).exclude(date_revoked=None).order_by('-date_assigned')\n\n context = {\n \"computer\": computer,\n \"current_assignment\": current_assignment,\n \"assignment_history\": assignment_history\n }\n\n return render(request, \"agileHR/computer_detail.html\", context)", "def getEmployees(self):\n return self.employees", "def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def eventdetails(http_request, event_id=0):\n\te = get_object_or_404(Event, pk=event_id)\n\tweather = list(Weather.objects.filter(day=e.edate).filter(zip=e.zip))\n\tif len(weather) == 0:\n\t\tw = None\n\telse:\n\t\tw = weather[0]\n\treturn render_to_response('event_detail.html', {'event': e,\n\t\t\t\t\t\t\t'w': w })", "def employee_list_group_by_badges_detail(request, badge_id):\n if request.method == 'GET':\n badge = get_object_or_404(Badge, pk=badge_id)\n employee_list = EmployeeBadge.objects.filter(badge=badge).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_list, request)\n serializer = EmployeeGroupedListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'problemfinder/details.html', {'question': question})", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def record_detail(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member, details are provided and template is rendered.\n record = get_object_or_404(models.Record, pk=pk)\n project = models.Project.objects.get(slug=slug)\n template = 'records/record_detail.html'\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'record':record,\n 'project':project,\n 'userperm':project.memberships.get(user=request.user),\n 'data':data\n }\n return render(request,template,context)", "def detail(request, article_id):\n return render(request, 'knowledgebase/detail.html', {'article_id': article_id})", "def about(request):\n realtors = Realtor.objects.order_by('-hire_date')\n mvp_realtors = Realtor.objects.all().filter(is_mvp=True)\n context = {\n 'realtors': realtors,\n 'mvp_realtors': mvp_realtors\n }\n return render(request, 'pages/about.html', context)", "def show(self, req, id):\n context = req.environ['meteos.context']\n\n try:\n model = self.engine_api.get_model(context, id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n\n return self._view_builder.detail(req, model)", "def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def stars_employee_list_group_by_category_detail(request, employee_id, category_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n category = get_object_or_404(Category, pk=category_id)\n stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def employee_login():\n return Response(render_template('admin/login.html'))", "def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def detail(request, reachcode):\n lake = get_object_or_404(Lake, reachcode=reachcode)\n photos = Photo.objects.filter(lake=lake)\n documents = Document.objects.filter(lake=lake)\n plants = lake.plants.all()\n return render(request, \"lakes/detail.html\", {\n \"lake\": lake,\n \"photos\": photos,\n \"documents\": documents,\n \"plants\": plants,\n })", "def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def team_details(request, id):\n template = loader.get_template('team/details.html')\n\n try:\n team = Team.objects.get(pk=id)\n team_members = User.objects.filter(profile__team=team)\n\n context = {\n 'team_name': team.name,\n 'team_info': team.information,\n 'team_logo': team.logo,\n 'team_members': team_members,\n 'days': Information.getDaysToContest()\n }\n\n except Team.DoesNotExist:\n context = None\n\n return CustomHttpResponse.send(template, context, request)", "def person(request, pk):\r\n person = get_object_or_404(Person, pk=pk)\r\n return HttpResponse('Person: %s' % person)" ]
[ "0.7529937", "0.7131289", "0.71110165", "0.6999811", "0.6908806", "0.6881819", "0.66219467", "0.6529428", "0.6462217", "0.64600676", "0.6398058", "0.6358623", "0.6351768", "0.63481253", "0.6347821", "0.63392276", "0.6267284", "0.623935", "0.6234159", "0.6226198", "0.6206023", "0.6200162", "0.61497295", "0.6126198", "0.6113187", "0.61120886", "0.6049017", "0.6004354", "0.5975923", "0.59708244", "0.5936071", "0.5934897", "0.5918847", "0.5912423", "0.5900706", "0.5898006", "0.58704376", "0.58698475", "0.5843286", "0.58270526", "0.58247256", "0.58045024", "0.5792652", "0.57594824", "0.5756625", "0.5753108", "0.5749746", "0.5745347", "0.57352835", "0.5734694", "0.5719445", "0.5719197", "0.5710443", "0.57053065", "0.56958705", "0.5693156", "0.56796885", "0.56730276", "0.5663401", "0.5652336", "0.5629631", "0.56091696", "0.56006616", "0.55616647", "0.5547363", "0.5537933", "0.5533468", "0.55083334", "0.54965436", "0.54820216", "0.5476782", "0.5459586", "0.5456978", "0.5440762", "0.5417508", "0.54153365", "0.5414436", "0.53869885", "0.5378453", "0.53763336", "0.537629", "0.5371751", "0.5364354", "0.5356327", "0.5352896", "0.53432304", "0.5340839", "0.5335357", "0.5325101", "0.53227574", "0.5320382", "0.53161854", "0.5315788", "0.53071445", "0.52756673", "0.52675897", "0.52664936", "0.5265712", "0.5257255", "0.5254923" ]
0.68222797
6
View for all employees current user is a manager for with empty development plan
def get_manager_employees(request): current_employee = Employee.objects.get(user__pk=request.user.pk) manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all() if manager_employees: emp_list=[] for emp in manager_employees: emp_data={} emp_data["id"] = emp.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["manager_id"] = emp.manager.id # emp_data["status_questions"] = emp.status_questions # employee_role = EmployeeRole.objects.filter(employee=emp).all() # name_role_list = [] # for obj in employee_role: # name_role_list.append(obj.role.name) # emp_data["roles"] = name_role_list emp_list.append(emp_data) data = {"employees:": emp_list} return JsonResponse(status=201, data=data) else: return JsonResponse("The user with id={} isn't a manager for any user".format(current_employee.user.id), status=404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def test_ReportingPeriodDetailView_current_employee_set_false(self):\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 2\n )", "def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')", "def is_manager(self) -> bool:\n return self.role in EmployeeRole.manager_roles()", "def is_employee():\n return _is_member('uw_employee')", "def test_managers_who_does_nothing(self):\n # Add 2 managers who do nothing\n self.manager_id = self._add_person(\"Manager\", \"ARRAY['Database']\", 30)\n self.manager_id1 = self._add_person(\"Manager\", \"ARRAY['AI']\", 30)\n\n # Run the query\n q = self.generate_query('view_manager_report', ())\n res = self.execute_query(q)\n assert len(res) == 2, f'There is suppose to be 2 entries {res}'", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def test_ReportingPeriodDetailView_current_employee_toggle(self):\n self.former_employee.user_data.current_employee = True\n self.former_employee.user_data.save()\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 3\n )\n self.former_employee", "def get_queryset(self, request):\n qs = super(EventAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(dep=request.user.profile.department)", "def test_returns_200_if_user_org_manager(self):\n # Add user to organisation so that it has PM role\n add_manager_to_organisation(self.test_organisation, self.test_author)\n # Act\n response = self.client.get(\n self.url, headers={\"Authorization\": self.author_session_token}\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"activeProjects\"]), 1)", "def get_queryset(self):\n\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # check if the user is a landville admin and return all records\n # even soft deleted ones\n return PropertyEnquiry.objects.all()\n\n if user.is_authenticated and user.role == 'CA':\n # if the user is a client admin, return only his records\n employer = user.employer.first()\n return PropertyEnquiry.active_objects.for_client(client=employer)\n\n # if the user is a buyer, return also only his enquiries\n return PropertyEnquiry.active_objects.for_user(user=user)", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def user(self, request):\n\t\t#return super(configManager, self).get_queryset().filter(models.Q(author=request.user) | models.Q(is_public=True)) ## For public showing, temporarily disabled\n\t\treturn super(configManager, self).get_queryset().filter(author=request.user)", "def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_superuser:\n return super().filter_queryset(queryset)\n return queryset.filter(collaborators=user)", "def allow_egap_admins(queryset, request):\n if hasattr(request, 'user') and not waffle.flag_is_active(request, EGAP_ADMINS):\n return queryset.exclude(name='EGAP Registration')\n return queryset", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')", "def is_usermanager(self):\n return False", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def test_returns_200_if_user_org_manager(self):\n # Arrange\n self.test_user.role = UserRole.MAPPER.value # Make sure user role is Mapper\n self.test_user.save()\n add_manager_to_organisation(self.test_project.organisation, self.test_user)\n # Act\n response = self.client.get(\n self.url, headers={\"Authorization\": self.user_session_token}\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n TestGetProjectsRestAPI.assert_project_response(\n response.json, self.test_project, assert_type=\"notasks\"\n )", "def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)", "def get_queryset(self):\n qs = self.queryset.filter(expiry_date__gt=timezone.now())\n if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists():\n qs = qs.filter(Q(applications__current_status__code='A'),\n Q(applications__removal_date__isnull=True))\n return qs", "def is_usermanager(self):\n return self.can(Permission.CRUD_USERS)", "def _get_standalone_queryset(self, queryset):\n # (not used yet) To be iso LTI, admin and instructor can retrieve all video's livesession\n if permissions.IsParamsVideoAdminThroughOrganization().has_permission(\n self.request, self\n ):\n return queryset\n # use can get his related livesession\n return queryset.filter(user_id=self.request.user.id)", "def is_management_or_admin(user):\n if user.id:\n if in_projects_admin_group(user) or \\\n shared_models.Section.objects.filter(head=user).count() > 0 or \\\n shared_models.Division.objects.filter(head=user).count() > 0 or \\\n shared_models.Branch.objects.filter(head=user).count() > 0:\n return True", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def default_get(self, cr, uid, fields, context=None): \n \n \n res = super(granted_rights_order, self).default_get(cr, uid, fields, context=context)\n \n employee_obj = self.pool.get('hr.employee')\n department_obj = self.pool.get('hr.department')\n manager = False\n donor_emp_id = []\n \n if uid != 1 :\n\n donor_emp_id = employee_obj.search(cr ,uid, [('user_id' , '=' , uid )])\n deparment_id = employee_obj.browse(cr,uid,donor_emp_id[0]).department_id.id\n \n if donor_emp_id[0] == department_obj.browse(cr,uid,deparment_id).manager_id.id :\n manager = True\n \n \n \n \n \n \n \n \n \n if donor_emp_id :\n res.update({ 'employee_donor': donor_emp_id[0], \n 'department_id' : deparment_id,\n 'is_a_amanger' : manager,\n })\n return res", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def filter_queryset(self, request, queryset, view):\n if view.action == \"retrieve\" and request.method == \"GET\":\n return queryset.model.objects.all()\n\n filtered_queryset = super().filter_queryset(request, queryset, view)\n org_users = set(\n [group.team.organization for group in request.user.groups.all()] +\n [o.user for o in filtered_queryset]\n )\n\n return queryset.model.objects.filter(user__in=org_users, user__is_active=True)", "def get_queryset(self):\n user = self.request.user\n if not (user.is_authenticated and user.check_permstring(\"builders\")):\n raise Http404(\"Not staff\")\n return super(IncompleteRosterListView, self).get_queryset()", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def queryset(self, request):\n qs = super(ShortURLAdmin, self).queryset(request)\n if request.user.has_perm('deflect.list_all'):\n return qs\n return qs.filter(creator=request.user)", "def get_queryset(self):\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n return Property.objects.all()\n\n if user.is_authenticated and user.employer.first():\n client = user.employer.first()\n return Property.active_objects.all_published_and_all_by_client(\n client=client)\n\n return Property.active_objects.all_published()", "def get_queryset(self):\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n return Property.objects.all()\n\n if user.is_authenticated and user.employer.first():\n client = user.employer.first()\n return Property.active_objects.all_published_and_all_by_client(\n client=client)\n\n return Property.active_objects.all_published()", "def get_queryset(self):\n\n user = self.request.user\n\n if user.role == 'LA':\n return PropertyEnquiry.objects.all()\n\n # check if the user is a client admin\n # and return all enquiries made on his/her property\n if user.role == 'CA':\n return PropertyEnquiry.active_objects.for_client(\n client=user.employer.first())\n\n # else if the user is a buyer return only\n # the records that are associated with him/her\n return PropertyEnquiry.active_objects.for_user(user=user)", "def get_queryset(self):\n return AutomaticEmail.objects.filter(staff_user=self.request.user)", "def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body", "def get_viewable(self, user):\n if user.get('role') in ('admin', 'manager', 'engineer'):\n return True\n return user['name'] == self.doc.get('customer')", "def get_managers():\n return {'managers': get_users('managers')}", "def usersview_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'usersview_admin.jinja2',\n users=users\n )", "def test_returns_all_projects_for_admin_if_managed_by_me_is_true(self):\n # Arrange\n self.test_project_2.private = False\n self.test_project_2.save()\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n self.test_user.role = UserRole.ADMIN.value\n self.test_user.save()\n # Act\n response_admin = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"managedByMe\": \"true\"},\n )\n self.assertEqual(response_admin.status_code, 200)\n self.assertEqual(len(response_admin.json[\"results\"]), 3)", "def get_queryset(self, request):\n qs = super().get_queryset(request)\n if not request.user.is_superuser and request.user.approved_organisations.exists():\n qs = qs.filter(organisation__in=request.user.approved_organisations.all()).distinct()\n return qs", "def manage_permission_only(self, **kw):\n return dict(page='managers stuff')", "def manage_permission_only(self, **kw):\n return dict(page='managers stuff')", "def manage_permission_only(self, **kw):\n return dict(page='managers stuff')", "def manage_permission_only(self, **kw):\n return dict(page='managers stuff')", "def manage_permission_only(self, **kw):\n return dict(page='managers stuff')", "def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def is_admin_or_project_manager(user, project):\n if user.id:\n\n # check to see if a superuser or projects_admin -- both are allow to modify projects\n if \"projects_admin\" in [g.name for g in user.groups.all()]:\n return True\n\n # check to see if they are a section head, div. manager or RDS\n if is_section_head(user, project) or is_division_manager(user, project) or is_rds(user, project):\n return True", "def get_queryset(self):\n qs = super(RetiresmartzViewSet, self).get_queryset()\n # Check user object permissions\n user = SupportRequest.target_user(self.request)\n return qs.filter_by_user(user)", "def available_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n available_employees_list = []\r\n total_sets = set()\r\n set_list = []\r\n\r\n for i in range(len(work_trips_by_date)):\r\n set_list.append(set(work_trips_by_date[i])) \r\n \r\n total_sets = set_list[0]\r\n \r\n if len(work_trips_by_date) != 1: \r\n for i in range(1,len(set_list)):\r\n total_sets.update(set_list[i])\r\n\r\n for line in employee_list:\r\n if line[0] not in total_sets:\r\n available_employees_list.append(line)\r\n\r\n row_names = ['id', 'name' ,'role' ,'rank'] #return columns\r\n employee_index_list = self.find_index_from_header('employee', row_names)\r\n filtered_available_employees = self.filter_by_header_index(employee_index_list, available_employees_list)\r\n\r\n available_employees_list.pop(0)\r\n\r\n return filtered_available_employees", "def get_queryset(self):\n user = self.request.user\n expenses = Expense.objects.filter(\n Q(userexpense__in=user.userexpense_set.all())\n | Q(group__in=user.group_set.all()))\n\n if self.request.query_params.get('q', None) is not None:\n expenses = expenses.filter(\n description__icontains=self.request.query_params.get(\n 'q', None))\n return expenses", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def get_employees(self):\n return self.employees", "def get_queryset(self):\n return get_user_model().objects.none()", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def list(self, request):\n\n records = filter_against_records(request)\n \n if 'faculty_id' in request.query_params:\n faculty = Faculties.objects.filter(id=request.query_params.get('faculty_id'))[0]\n departments = Departments.objects.filter(faculty_id=model_to_dict(faculty)['id'])\n for department in departments:\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'department_id' in request.query_params:\n department = Departments.objects.filter(id=request.query_params.get('department_id'))[0]\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'start_year_id' in request.query_params:\n start_year = StartYears.objects.filter(id=request.query_params.get('start_year_id'))[0]\n education_programs = EducationPrograms.objects.filter(start_year_id=model_to_dict(start_year)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'specialization_id' in request.query_params:\n specialization = Specializations.objects.filter(id=request.query_params.get('specialization_id'))[0]\n education_programs = EducationPrograms.objects.filter(specialization_id=model_to_dict(specialization)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_level_id' in request.query_params:\n education_level = EducationLevels.objects.filter(id=request.query_params.get('education_level_id'))[0]\n education_programs = EducationPrograms.objects.filter(education_level_id=model_to_dict(education_level)['id'])\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_program_id' in request.query_params:\n education_program = EducationPrograms.objects.filter(id=request.query_params.get('education_program_id'))[0]\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'student_id' in request.query_params:\n records = records.filter(student_id=request.query_params.get('student_id'))\n\n \n\n \n \n \n students = Students.objects.all()\n res = []\n for student in students:\n student_records = records.filter(student_id=model_to_dict(student)['id'])\n if len(student_records) > 0:\n res.append(student)\n\n return Response(normalize_students(res))", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def get_queryset(self):\n return super(ActiveUsersManager, self).get_queryset().filter(user__is_active=True)", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def _get_project_by_manager(userid):\n return Project.objects.filter(project_open=True, manager=userid).order_by(\n \"created_at\"\n )", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def employeeHome(request):\n # assert isinstance(request, HttpRequest)\n \n if userHasBeenCleared(request):\n \n return render(\n request,\n 'app/index_employee.html',\n {\n 'title':'Home Page',\n 'year':datetime.now().year,\n 'email':getEmailSessionVar(request),\n 'isAdmin':isAdmin(request), \n }\n )\n return login(request)", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def getInterestedUsers():", "def get_queryset(self):\n reserva = self.request.user.reserva\n if reserva:\n return Reservado.objects.filter(\n reserva=reserva,\n reserva__completada=False)\n else:\n return Horario.objects.none()", "def get_queryset(self, **kwargs):\n username = self.request.user.username\n query = Meal.objects.filter(member__username=username)\n return query", "def get_lists(self, request):\n target_user = User.objects.filter(email=request.POST['email'])\n if not target_user.exists():\n # In this case we don't want to return to the initial page\n return JsonResponse({\n 'msg': \"ERROR: The user doesn't exist\"\n })\n\n requests = Request.objects.get_active_by_user(target_user.first())\n borrowings = Borrowing.objects.get_active_by_user(target_user.first())\n html = render_to_string(\"include/hardware_admin_user.html\", {\n 'requests': requests,\n 'borrowings': borrowings\n })\n return JsonResponse({\n 'content': html\n })", "def index(request):\n users = User.objects.filter(is_staff=False, is_active=True).order_by('username')\n return render(request, 'users/view_all_users.html',\n { 'users': users })", "def queryset(self, request: HttpRequest, queryset: QuerySet) -> QuerySet:\n return {\n 'superuser': queryset.filter(is_superuser=True),\n 'staff': queryset.filter(is_staff=True),\n 'scanlator': queryset.filter(groups__name='Scanlator'),\n 'regular': queryset.exclude(is_staff=True)\n }.get(self.value() or '', queryset)", "def getEmployees(self):\n return self.employees", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def get_queryset(self):\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # admins view all property, no filtering\n return Property.objects.all()\n\n if user.is_authenticated and user.employer.first():\n # if the user is a client_admin, they see all published property\n # and also their client's published and unpublished property.\n client = user.employer.first()\n return Property.active_objects.all_published_and_all_by_client(\n client=client)\n\n # other users only see published property\n return Property.active_objects.all_published()", "def get_managers_list(self):\n try:\n role_id = [x[0] for x in self.db_handler.get_roles_list() if x[1] == 'Менеджер'][0]\n staff_by_role = self.db_handler.get_all_staff_by_role_id(role_id)\n\n self.logger.write_to_log('managers list got', 'model')\n\n return staff_by_role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def filter_for_user(self, user):\n if user.is_anonymous or user.is_staff or user.is_support:\n return self\n\n return self.filter(\n Q(\n project__permissions__user=user,\n project__permissions__is_active=True,\n )\n | Q(\n project__customer__permissions__user=user,\n project__customer__permissions__is_active=True,\n )\n | Q(\n offering__customer__permissions__user=user,\n offering__customer__permissions__is_active=True,\n )\n ).distinct()", "def test_readonly_user(self):\n\n self.user = self.make_user()\n\n ma = EmployeeAdmin(model=Employee, admin_site=None)\n\n self.assertEqual(\n hasattr(self.user, 'employee'),\n False\n )\n # since there is no atribute employee in self user, we\n # can assume that obj=None\n self.assertEqual(\n list(ma.get_readonly_fields(self, obj=None)),\n []\n )\n\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )\n\n self.assertEqual(\n hasattr(self.user, 'employee'),\n True\n )\n\n ma1 = EmployeeAdmin(model=Employee, admin_site=None)\n self.assertEqual(\n list(ma1.get_readonly_fields(self, obj=self.user.employee)),\n ['user']\n )", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def test_ReportingPeriodList_json_no_longer_employed(self):\n # Create a user, but set the user as unemployed\n self.regular_user = User.objects.create(\n username='new.user')\n userdata = UserData(user=self.regular_user)\n userdata.current_employee = False\n userdata.save()\n\n reporting_periods = client().get(reverse('ReportingPeriodList')).data\n start_date = reporting_periods[0]['start_date']\n res = client().get(reverse(\n 'ReportingPeriodAudit',\n kwargs={'reporting_period_start_date': start_date}\n )\n ).data\n self.assertEqual(len(res), 0)", "def get_queryset(self):\n qs = self.queryset\n if not self.request.user.groups.filter(name=REGISTRIES_VIEWER_ROLE).exists():\n qs = qs.filter(\n Q(applications__current_status__code='A'),\n Q(applications__removal_date__isnull=True))\n if self.kwargs.get('activity') == 'drill':\n qs = qs.filter(registries_activity='DRILL')\n if self.kwargs.get('activity') == 'install':\n qs = qs.filter(registries_activity='PUMP')\n return qs", "def _current_login_employee(self):\n hr_employee = self.env[\"hr.employee\"].search(\n [(\"user_id\", \"=\", self._current_login_user())], limit=1\n )\n return hr_employee.id", "def users_with_role(self):\r\n return User.objects.none()", "def get(self):\n authenticated_user_id = token_auth.current_user()\n orgs_dto = OrganisationService.get_organisations_managed_by_user_as_dto(\n authenticated_user_id\n )\n if len(orgs_dto.organisations) < 1:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n search_dto = self.setup_search_dto()\n admin_projects = ProjectAdminService.get_projects_for_admin(\n authenticated_user_id,\n request.environ.get(\"HTTP_ACCEPT_LANGUAGE\"),\n search_dto,\n )\n return admin_projects.to_primitive(), 200", "def user_has_access(self, user):\n if not user: return False\n query = db.Query(TaskListMember)\n query.filter('task_list =', self)\n query.filter('user =', user)\n return query.get()" ]
[ "0.7106553", "0.6753194", "0.674842", "0.6595502", "0.6566336", "0.63812053", "0.61806154", "0.61537486", "0.612033", "0.60830206", "0.60527897", "0.60266685", "0.596054", "0.5893149", "0.5841044", "0.57119644", "0.56820273", "0.56635785", "0.5655162", "0.5615645", "0.5613845", "0.5602607", "0.5572919", "0.554382", "0.5536185", "0.55342567", "0.55089074", "0.54941607", "0.54308563", "0.542971", "0.5427188", "0.5425637", "0.5397866", "0.53794146", "0.53750765", "0.5367295", "0.53473127", "0.5343096", "0.5324519", "0.5290491", "0.5289473", "0.52888507", "0.5284244", "0.5281843", "0.5280169", "0.5280169", "0.5278413", "0.52557886", "0.5249034", "0.52482194", "0.5233246", "0.52267945", "0.5222539", "0.5221709", "0.5221392", "0.5221392", "0.5221392", "0.5221392", "0.5221392", "0.51981926", "0.5189446", "0.51889163", "0.5184994", "0.51749545", "0.5168319", "0.5161737", "0.51572776", "0.5129682", "0.5122577", "0.51219624", "0.5109278", "0.5098632", "0.5090985", "0.5085223", "0.50753987", "0.5074422", "0.50656646", "0.5063758", "0.50597906", "0.50552", "0.5045343", "0.5043057", "0.5042162", "0.5039404", "0.50357234", "0.5029422", "0.5027359", "0.5023092", "0.5021821", "0.5020627", "0.50197566", "0.5018192", "0.5015877", "0.5012698", "0.5012465", "0.50090754", "0.5007883", "0.49942163", "0.49923512", "0.49912727" ]
0.73281884
0
View for detail of employee
def profile_detail(request, employee_id): current_employee = Employee.objects.filter(user__pk=request.user.pk).first() employee = Employee.objects.get(pk=int(employee_id)) if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: raise PermissionDenied() actions = employee.action_set.all() if not current_employee.pk == int(employee_id): if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk: if not current_employee.isCompanySuperUserOrHigher(): return HttpResponse('unauthorized', status=401) user_files = get_files_for_employee(employee_id) if request.method == 'POST': upload_form = UploadFileToEmployeyForm(request.POST, request.FILES) form = EmployeeNoteForm(request.POST, instance=employee) if 'upload' in request.POST: if upload_form.is_valid(): upload_form.handle_upload(employee_id, request.FILES['file']) return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id)) else: if form.is_valid(): form.save(request.user, employee) return HttpResponseRedirect('/employee/show/%d' % form.instance.pk) else: form = EmployeeNoteForm(instance=employee) upload_form = UploadFileToEmployeyForm() data = {} data["user"] = employee.user.first_name + " " + employee.user.last_name data["id"] = str(employee.user.pk) data["title"] = employee.title data["email"] = employee.user.email data["phone"] = employee.phone company_dict = {} company_dict["name"] = employee.company.name company_dict["id"] = str(employee.company.pk) data["company"] = company_dict employee_username = "" emp = Employee.objects.filter(manager=employee.manager).all() for obj in emp: employee_username = obj.manager.user.username if obj.manager else "" employee_first = obj.manager.user.first_name if obj.manager else "" employee_last = obj.manager.user.last_name if obj.manager else "" manager_dict = {} manager_dict["name"] = employee_username manager_dict["id"] = employee_id manager_dict["first_last_name"] = employee_first + " " + employee_last data["manager"] = manager_dict data["date_of_birth"] = employee.date_of_birth data["status_questions"] = employee.status_questions data["notes"] = employee.notes employee_role = EmployeeRole.objects.filter(employee=employee).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) data["roles"] = name_role_list data["potenciale"] = employee.potenciale data["date_start"] = employee.created_at data["is_manager"] = employee.is_manager data["date_finish"] = "" data['photo'] = employee.photo.url if employee.photo else '' return JsonResponse(status=200, data=data) # return TemplateResponse( # request, # 'mus/detail.html', # { # 'actions': actions, # 'employee': employee, # # 'development_plans': development_plans, # 'form': form, # 'upload_form': upload_form, # 'user_files': user_files # } # )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def employee():\n return Response(render_template('employee/employee.html'))", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def details(request):\n\treturn render(request, 'ExcelApp/main.html')", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def employee_profile_page(cls, employee_id):\n return cls.__profile_page(employee_id, cls._logger)", "def home(request):\n\n\tcontext_dict = {}\n\temployee = models.Teacher.objects.filter(\n\t\tuser=request.user\n\t).first()\n\t# context_dict = {\n\t# context_helper.get_emp_info(employee)\n\t# }\n\t# print (context_dict)\n\tcontext_dict.update(context_helper.get_emp_info(employee))\n\treturn render(request, \"home.html\", context_dict)", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def profile_page(cls, employee_id, logger=None):\n if logger is None:\n logger = cls._logger\n\n database_connection = DatabaseConnection(f\"employees.csv\")\n table = database_connection.table\n employee = Employee(employee_id)\n\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n\n while True:\n\n choice = input(\n \"Please choose: \"\n \"(1) check data, \"\n \"(2) update first name, \"\n \"(3) update last name, \"\n \"(4) save changes, \"\n \"(5) exit without saving \"\n )\n if choice not in ('1', '2', '3', '4', '5'):\n logger.log(\"Please pick a valid choice\")\n elif choice=='1':\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n elif choice=='2':\n first_name = input(\"Enter your first name: \")\n employee.set_first_name(first_name)\n elif choice=='3':\n last_name = input(\"Enter your last name: \")\n employee.set_last_name(last_name)\n elif choice=='4':\n table[\n (table['employee_id']==employee.get_employee_id())\n ] = pd.Series(\n {'employee_id': employee.get_employee_id(),\n 'first_name': employee.get_first_name(),\n 'last_name': employee.get_last_name(),\n }\n )\n database_connection.overwrite()\n logger.log(\"Information saved!\")\n else:\n break", "def get(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n ).data\n }\n return Response(response)", "def occupation(request, pk):\r\n occupation = get_object_or_404(Occupation, pk=pk)\r\n return HttpResponse('Occupation: %s' % occupation)", "def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )", "def detail(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n user = request.user\n return render(request, 'kvent/event-detail.html', {'event': event, 'user': user})", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def employee(self) -> object:\n return self._employee", "def event_collaborator_detail(request, event_id, collaborator_id):\n if request.method == 'GET':\n event = get_object_or_404(Event, pk=event_id)\n collaborator = Employee.objects.all().filter(event=event, pk=collaborator_id)\n if collaborator:\n is_registered = True\n else:\n is_registered = False\n serializer = CollaboratorAttendanceSerializer(event, context={'is_registered': is_registered})\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def display_certs(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n certs = employee_certification.query.filter_by(employee_id = employee_id).all()\n \n all_certs = Cert.query.all()\n \n return render_template(\"users/display_cert.html\", employee = employee, certs = certs, all_certs = all_certs)", "def __str__(self):\n return \"Employee attributes {}, {}, {} ,{}, {}, {}\". \\\n format(self._last_name, self._first_name, self._address, self._phone_number,\n self._start_date, self._salary)", "def get(self, request):\n employee = EmployeeDetail.objects.all()\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n many=True\n ).data\n }\n return Response(response)", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def retrieve(self, request, pk=None):\n try:\n team_employee = self.get_team_employee_object(pk)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(team_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def view_experiment(request,id):\n\texp = Experiment.objects.get(id=id)\n\tpossibly_related = get_related(exp)\n\treturn list_detail.object_detail(request,\n\t\t\t\t\t\t\t\t\tqueryset=Experiment.objects.filter(id=id),\n\t\t\t\t\t\t\t\t\tobject_id=exp.id,\n\t\t\t\t\t\t\t\t\ttemplate_name='experiments/experiment.html',\n\t\t\t\t\t\t\t\t\textra_context= {\"possibly_related\" : possibly_related})", "def event_detail():\n # getting event id from homepage \n event_id = request.args.get('eventId')\n # counting the total number of registeration for an event.\n registrant_count = db.session.query(Register).filter(Register.event_id ==event_id).count()\n event = db.session.query(Event).filter(Event.event_id == event_id).first()\n format = '%a %I:%M %p %b %d, %y'\n event.date = event.date.strftime(format)\n event.time = event.time.strftime(format)\n location = event.location\n return render_template(\"event.html\", event= event, registrant_count=registrant_count)", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def delegation_show(request, pk):\n delegation = Delegation.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(delegation_id=pk).order_by(\"committee__name\")\n\n context = {\"delegation\": delegation, \"delegates\": delegates, \"delegation_show\": True}\n template = \"jurycore/delegation_show.html\"\n return render(request, template, context)", "def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)", "def get_employees(self):\n return self.employees", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def get(self, uuid: str):\n try:\n employee = self.service.get_employee_by_uuid(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def event_participant_detail(request, event_id, participant_id):\n if request.method == 'GET':\n event = get_object_or_404(Event, pk=event_id)\n participant = get_object_or_404(Participant, pk=participant_id)\n try:\n attendance = Attendance.objects.get(event=event, participant=participant)\n except:\n attendance = Attendance(participant=participant, event=event, is_registered=False)\n serializer = AttendanceSerializer(attendance)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def badges_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_bages = EmployeeBadge.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_bages, request)\n serializer = EmployeeBadgeSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def get_details(office_id):\n\n office = OfficeModel()\n office_exists = office.get_one(office_id)\n print(office)\n if office_exists is not None:\n return make_response(jsonify(\n {'status': 200, 'data': office.sub_set()}\n ), 200)\n\n return make_response(jsonify(\n {'status': 404,\n \"error\": 'Office with id {} not found'.format(office_id)}\n ), 404)", "def get_emp_data(self,employee):\n\t\temp = None\n\t\tfind_by = employee.find_elements_by_tag_name\n\t\tif str(type(employee)) != \"<type 'NoneType'>\" and main.is_desktop():\n\t\t\t# columns = employee.find_elements_by_tag_name(\"td\")\n\t\t\temp = {\n\t\t\t\t'name': find_by('td')[0].text,\n\t\t\t\t'id': find_by('td')[1].text,\n\t\t\t\t'status': find_by('td')[2].text,\n\t\t\t\t'election': find_by('td')[3].text,\n\t\t\t\t'date_changed': find_by('td')[4].text\n\t\t\t}\n\t\telif str(type(employee)) != \"<type 'NoneType'>\":\n\t\t\temp = {\n\t\t\t\t'name': find_by('div')[2].text,\n\t\t\t\t'id': find_by('div')[3].text[13:],\n\t\t\t\t'status': find_by('div')[4].text[8:], #Fail 4:20p, StaleEl\n\t\t\t\t'election': find_by('div')[5].text[17:], #Fail 4:15p, StaleEl\n\t\t\t\t'date_changed': find_by('div')[6].text[14:]\n\t\t\t}\n\n\t\t# raw_input(str(emp))\n\t\treturn emp", "def retrieve(self, request, pk=None):\n\n try:\n expense = Expenses.objects.get(pk=pk)\n serializer = ExpenseSerializer(\n expense, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def holiday_detail(request, holiday_id):\n\n all_holidays = Product.objects.filter(is_holiday=True)\n holiday = get_object_or_404(all_holidays, pk=holiday_id)\n itinerary = Itinerary.objects.get(holiday=holiday)\n itinerary_day = ItineraryDay.objects.filter(itinerary=itinerary)\n faq = Faq.objects.filter(holiday=holiday)\n\n context = {\n 'holiday': holiday,\n 'itinerary': itinerary,\n 'itinerary_day': itinerary_day,\n 'faq': faq,\n }\n\n return render(request, 'products/holiday_detail.html', context)", "def computer_detail(request, computer_id):\n\n computer = get_object_or_404(Computer, pk=computer_id)\n current_assignment = EmployeeComputer.objects.filter(computer_id=computer_id).filter(date_revoked=None)\n assignment_history = EmployeeComputer.objects.filter(computer_id=computer_id).exclude(date_revoked=None).order_by('-date_assigned')\n\n context = {\n \"computer\": computer,\n \"current_assignment\": current_assignment,\n \"assignment_history\": assignment_history\n }\n\n return render(request, \"agileHR/computer_detail.html\", context)", "def getEmployees(self):\n return self.employees", "def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def eventdetails(http_request, event_id=0):\n\te = get_object_or_404(Event, pk=event_id)\n\tweather = list(Weather.objects.filter(day=e.edate).filter(zip=e.zip))\n\tif len(weather) == 0:\n\t\tw = None\n\telse:\n\t\tw = weather[0]\n\treturn render_to_response('event_detail.html', {'event': e,\n\t\t\t\t\t\t\t'w': w })", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def employee_list_group_by_badges_detail(request, badge_id):\n if request.method == 'GET':\n badge = get_object_or_404(Badge, pk=badge_id)\n employee_list = EmployeeBadge.objects.filter(badge=badge).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_list, request)\n serializer = EmployeeGroupedListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'problemfinder/details.html', {'question': question})", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def record_detail(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member, details are provided and template is rendered.\n record = get_object_or_404(models.Record, pk=pk)\n project = models.Project.objects.get(slug=slug)\n template = 'records/record_detail.html'\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'record':record,\n 'project':project,\n 'userperm':project.memberships.get(user=request.user),\n 'data':data\n }\n return render(request,template,context)", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def detail(request, article_id):\n return render(request, 'knowledgebase/detail.html', {'article_id': article_id})", "def show(self, req, id):\n context = req.environ['meteos.context']\n\n try:\n model = self.engine_api.get_model(context, id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n\n return self._view_builder.detail(req, model)", "def about(request):\n realtors = Realtor.objects.order_by('-hire_date')\n mvp_realtors = Realtor.objects.all().filter(is_mvp=True)\n context = {\n 'realtors': realtors,\n 'mvp_realtors': mvp_realtors\n }\n return render(request, 'pages/about.html', context)", "def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def stars_employee_list_group_by_category_detail(request, employee_id, category_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n category = get_object_or_404(Category, pk=category_id)\n stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def employee_login():\n return Response(render_template('admin/login.html'))", "def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def detail(request, reachcode):\n lake = get_object_or_404(Lake, reachcode=reachcode)\n photos = Photo.objects.filter(lake=lake)\n documents = Document.objects.filter(lake=lake)\n plants = lake.plants.all()\n return render(request, \"lakes/detail.html\", {\n \"lake\": lake,\n \"photos\": photos,\n \"documents\": documents,\n \"plants\": plants,\n })", "def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def team_details(request, id):\n template = loader.get_template('team/details.html')\n\n try:\n team = Team.objects.get(pk=id)\n team_members = User.objects.filter(profile__team=team)\n\n context = {\n 'team_name': team.name,\n 'team_info': team.information,\n 'team_logo': team.logo,\n 'team_members': team_members,\n 'days': Information.getDaysToContest()\n }\n\n except Team.DoesNotExist:\n context = None\n\n return CustomHttpResponse.send(template, context, request)", "def person(request, pk):\r\n person = get_object_or_404(Person, pk=pk)\r\n return HttpResponse('Person: %s' % person)" ]
[ "0.75270534", "0.71302915", "0.71097803", "0.6996392", "0.6907323", "0.6882247", "0.6820251", "0.6622057", "0.65266895", "0.6458678", "0.63968426", "0.6358651", "0.6349126", "0.63481754", "0.6346942", "0.63395876", "0.62652826", "0.62371093", "0.6237032", "0.6230674", "0.6204776", "0.6199204", "0.6147837", "0.6123448", "0.61137253", "0.61093146", "0.6045303", "0.60056365", "0.59753907", "0.5970024", "0.59397614", "0.59347624", "0.5917986", "0.5909123", "0.59037334", "0.58962065", "0.5874488", "0.5869599", "0.5839436", "0.5826282", "0.582445", "0.58020496", "0.5796626", "0.5757545", "0.57563347", "0.5755614", "0.5750246", "0.57444644", "0.5735266", "0.57339376", "0.5718938", "0.5717022", "0.5711695", "0.57020116", "0.56955725", "0.5693042", "0.56751096", "0.5670648", "0.56669945", "0.56560564", "0.56285113", "0.56066614", "0.55978084", "0.5562328", "0.55515295", "0.55408454", "0.5530477", "0.55070126", "0.54963", "0.54848593", "0.54738253", "0.5463432", "0.5452122", "0.5444176", "0.54206455", "0.5418186", "0.5411284", "0.538734", "0.53805006", "0.5378946", "0.537372", "0.5371442", "0.53631544", "0.5359077", "0.5353271", "0.5346888", "0.5345829", "0.5340531", "0.53292304", "0.53279984", "0.53217137", "0.53148925", "0.5314146", "0.530686", "0.5274361", "0.527151", "0.5271126", "0.52632093", "0.52623594", "0.5259857" ]
0.6460907
9
View for creating employee in company
def create_employee(request, company_id): company = Company.objects.get(pk=company_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: logUnauthorizedAccess("User tried to create_employee", request) raise PermissionDenied() form = EmployeeForm(request, initial=dict(company=company)) form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company) # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter( # Q(company=company) | Q(company__isnull=True)) # data = { # 'employee_form': form.cleaned_data, # 'company': company.cleaned_data["name"] # } return TemplateResponse( request, 'mus/create_employee_form.html', { 'employee_form': form, } ) # data = { # 'employee_form': form.cleaned_data, # 'company': company.cleaned_data["name"] # } # return JsonResponse(status=200, data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )", "def employee():\n return Response(render_template('employee/employee.html'))", "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def company():\n\n company = Company.objects.create(name='Tre G.M.B.H.', country='Germany')\n return company", "def create_many_employees(request, company_id=None):\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n if \"upload\" in request.POST:\n form = UploadEmployeesForm(request.POST, request.FILES)\n if form.is_valid():\n data = csv_to_dict(request.FILES['file'])\n request.session['upload_employees'] = data\n return JsonResponse(status=201, data=form.cleaned_data)\n # return TemplateResponse(\n # request,\n # 'mus/create_many_employees_uploaded.html',\n # dict(data=data, company=company)\n # )\n elif \"next\" in request.POST:\n data = request.session['upload_employees']\n marked_data = list()\n fields = request.POST.getlist('field[]')\n for row in data:\n new_row = dict(is_manager=False)\n for i, item in enumerate(row):\n field_id = int(fields[i])\n if field_id == 1:\n new_row['first_name'] = item\n elif field_id == 2:\n new_row['last_name'] = item\n elif field_id == 3:\n p = item.partition(\" \")\n new_row['first_name'] = p[0]\n new_row['last_name'] = p[2]\n elif field_id == 4:\n new_row['email'] = item\n elif field_id == 5:\n new_row['username'] = item\n marked_data.append(new_row)\n formset = EmployeeRowFormSet(initial=marked_data)\n # TypeQS = DevelopmentPlanType.objects.filter(Q(company=company) | Q(company__isnull=True))\n # for form in formset:\n # form.fields['development_plan_type'].queryset = TypeQS\n return TemplateResponse(\n request,\n 'mus/create_many_employees_form.html',\n dict(formset=formset, company=company)\n )\n elif \"next2\" in request.POST:\n formset = EmployeeRowFormSet(request.POST)\n if formset.is_valid():\n data = list()", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def create_calendar(request):\n if request.method == 'POST':\n\n form = CalendarForm(request.POST)\n \n if form.is_valid():\n calendar = form.save(commit=False) # prvent form from saving since we need to link company\n calendar.company = request.user.company\n calendar.save()\n return redirect('appointment:calendar_list')\n else:\n form = CalendarForm()\n return render(request, 'calendar_form.html', {'form': form})", "def competitors_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n competitors_form = CompetitorsForm()\n return render_to_response('competitors_form.html', {'form': competitors_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n competitors_form = CompetitorsForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if competitors_form.is_valid():\n cf = competitors_form.save(commit=False)\n\n #verify if other companies with the same info exists anywhere\n try: \n comparison = Competitors.objects.get(name=cf.name,company= company)\n \n if str(comparison.name) != str(cf.name):\n cf.company = company\n cf.save()\n \n else:\n form_errors = {\"Name - The competitor \" + str(comparison.name).capitalize() + \" has been already created for \"+ str(company.name).capitalize() + \".\"}\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': form_errors, 'company':company},\n context_instance=RequestContext(request))\n\n except Competitors.DoesNotExist :\n cf.company = company\n cf.save()\n\n\n \n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': competitors_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def index(request):\n return render(request, 'companies/index.html', {'companyform': CompanyForm()})", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def get(self,request,*args,**kwargs):\n\n\t\tsucursal = Sucursal.objects.get(id=kwargs['spk'])\n\n\t\tuser_form = UserForm()\n\t\templeado_form = EmpleadoForm( initial={'sucursal':sucursal.id} )\n\n\t\tforms = [user_form,empleado_form]\n\t\tcontext = {\n\t\t'section_title':'Nuevo Empleado',\n\t\t'button_text':'Crear',\n\t\t'sucursal':sucursal,\n\t\t'user_form':user_form,\n\t\t'empleado_form':empleado_form }\n\n\t\treturn render_to_response(\n\t\t\t'empleado/empleado_form.html',\n\t\t\tcontext,\n\t\t\tcontext_instance=RequestContext(request))", "def certification_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n certification_form = CertificationForm()\n return render_to_response('certification_form.html', {'form': certification_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n certification_form = CertificationForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if certification_form.is_valid():\n of = certification_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('certification_form.html', \n {'form': certification_form, 'form_errors': certification_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def customer_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n customer_form = CustomerForm()\n return render_to_response('customer_form.html', {'form': customer_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n customer_form = CustomerForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if customer_form.is_valid():\n of = customer_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def createEmployee():\n form = CreateEmployeeForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n staff = Staff(first_name=form.first_name.data, last_name=form.last_name.data, password=hashed_password, \n email=form.email.data, role=form.role.data, location=form.location.data)\n db.session.add(staff)\n db.session.commit()\n flash(f'Employee Added To Database', category='Success')\n return redirect(url_for('login'))\n return render_template('new_employee.html', title=\"Register\", form=form)", "def employee_add(request, user_id):\n\n if request.method == \"GET\":\n employee = User.objects.get(pk=user_id)\n orgs = Organization.objects.filter(administrator=request.user)\n\n #desig_list = orgs.values_list('designations', flat=True)\n #designations = Designation.objects.filter(pk__in=desig_list)\n\n return render_to_response('organization/employee_add1.html',\n {'employee':employee, 'orgs': orgs},\n context_instance=RequestContext(request))\n\n user_id = request.POST.get('employee_id')\n org = request.POST.get('add2orgs') # id of selected org\n designations_list = request.POST.getlist('designations2add')#list of ids of selected designation\n\n try:\n user = User.objects.get(pk=user_id)# emp obj to add\n org = Organization.objects.get(pk=org)#selected org objects\n designations = Designation.objects.filter(pk__in=designations_list)#convert desig id in obj\n except Exception, e:\n # log this error\n print str(e)\n messages.error(request, str(e))\n user = org = designations = None\n\n if not (user and designations.count()):\n # add error message.\n messages.error(request, \"Select atleast one Organization and its Designation\")\n # redirect to the same page.\n return HttpResponseRedirect(request.path)\n\n #def send_email():\n # template = get_template('organization/confirmation_email.txt')\n # context = Context({'user':user,'org':o.organization,'desig':desig})\n # subject = u'confirmation email'\n # message = template.render(context)\n # send_mail(subject,message,settings.DEFAULT_FROM_EMAIL,['[email protected]'])\n\n # create visiting card(s) for the employee\n try:\n l=[]\n for desig in designations:\n if desig in org.designations.all():\n o,c = VisitingCards.objects.get_or_create(organization=org,\n designation=desig, user=user)\n\n if c:\n #send_email()\n template = get_template('organization/confirmation_email.txt')\n context = Context({'user':user,'org':o.organization,'desig':desig})\n subject = u'confirmation email'\n message = template.render(context)\n msg = (subject,message,settings.DEFAULT_FROM_EMAIL, ['[email protected]'])\n l.append(msg)\n #t=threading.Thread(target=send_email)\n #t.start()\n messages.success(request,\"Employee Added Succefully..!\")\n else:\n messages.info(request,\" %s Already Added ..!\" % user.username)\n else:\n messages.error(request,\"%s Doesnt have selected designation %s .!\" %(orgs, desig))\n raise Exception()\n if c:\n tupl=tuple(l)\n threading.Thread(send_mass_mail(tupl, fail_silently=False)).start()\n messages.success(request,\"Employee Added Succefully..!\")\n\n\n\n except:\n messages.error(request,\"something went wroung \")\n return HttpResponseRedirect(request.path)\n\n return HttpResponseRedirect(\"/org/user/list\")", "def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)", "def add(request):\n if request.method == 'POST':\n form = CompanyForm(request.POST, request.FILES)\n if form.is_valid():\n form.instance.owner = request.user\n form.save()\n url = reverse('companies_list_all')\n return HttpResponseRedirect(url)\n else:\n form = CompanyForm()\n\n context = dict(form=form)\n return render(request, 'companies/add.html', context)", "def test_website_companies_create(self):\n pass", "def detail(request, company_id):\n company = get_object_or_404(Company, pk=company_id)\n\n company_form = CompanyForm(instance=company)\n contact_form = ContactCreationForm()\n\n return render(request, 'companies/detail.html', {\n 'company_detail': company,\n 'company_form': company_form,\n 'contact_form': contact_form\n })", "def create_company(self):\n self.driver.get(f'{self.base_url}/company-register')\n\n # Fill the company name\n enter_random_string = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'companyName')))\n enter_random_string.send_keys(self.random_string)\n\n # Press \"Save and Continue\"\n self.driver.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/div[2]/div/div[2]/div[2]/div[2]/div/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)", "def new_computer(request):\n\n if request.method == \"POST\":\n\n try:\n make = request.POST[\"make\"]\n model = request.POST[\"model\"]\n serial_no = request.POST[\"serial_no\"]\n purchase_date = request.POST[\"purchase_date\"]\n employee_id = request.POST[\"employee\"]\n employee = Employee.objects.get(pk=employee_id)\n\n if make is \"\" or model is \"\" or serial_no is \"\" or purchase_date is \"\":\n return render(request, \"agileHR/computer_new.html\", {\n \"error_message\": \"Please fill out all fields\",\n \"make\": make,\n \"model\": model,\n \"serial_no\": serial_no,\n \"purchase_date\": purchase_date\n })\n else:\n now = datetime.datetime.now()\n new_computer = Computer(make=make, model=model, serial_no=serial_no, purchase_date=purchase_date)\n new_computer.save()\n join = EmployeeComputer.objects.create(\n computer = new_computer,\n employee = employee,\n date_assigned = now\n )\n join.save()\n\n return HttpResponseRedirect(reverse(\"agileHR:computer_detail\", args=(new_computer.id,)))\n except KeyError:\n return render(request, \"agileHR/computer_new.html\", {\n \"error_message\": \"Please fill out all fields\"\n })\n else:\n # Get all computer assignment history\n computer_assignments = EmployeeComputer.objects.all()\n\n # Get employees who have had a computer but do not currently have one.\n need_computers = Employee.objects.exclude(employeecomputer__date_revoked=None).order_by('last_name')\n\n # Get employees who have never had a computer.\n never_computers = Employee.objects.exclude(employeecomputer__in=computer_assignments).order_by('last_name')\n\n # Combine the two querysets\n final_list = need_computers | never_computers\n\n context = {\n \"employees\": final_list\n }\n\n return render(request, \"agileHR/computer_new.html\", context)", "def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def test_new_employer_crud_methods(self):\n response = self.client.post(\n '/employers/', self.new_employer_data, format='json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(len(Employer.objects.all()), 2)\n\n # test one employer retrieve\n response = self.client.get('/employers/1/')\n self.assertEqual(response.status_code, 200)\n self.assertIn('Andela', response.data['name'])\n\n # test one employer update\n response = self.client.put('/employers/1/',\n {'name': 'New Employer'})\n self.assertEqual(response.status_code, 200)\n self.assertIn('New Employer', response.data['name'])", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def acquisition_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n acquisition_form = AcquisitionForm()\n return render_to_response('acquisition_form.html', {'form': acquisition_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n acquisition_form = AcquisitionForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if acquisition_form.is_valid():\n aqf = acquisition_form.save(commit=False)\n aqf.company = company\n aqf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('acquisition_form.html', \n {'form': acquisition_form, 'form_errors': acquisition_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):\n\n job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, application_listed = application_listed, salary = salary)\n db.session.add(job_detail)\n db.session.commit()\n\n return job_detail", "def register():\n add_employee = True\n form = RegistrationForm()\n if form.validate_on_submit():\n employee = Employee(email=form.email.data,\n username=form.username.data,\n glad_id=form.glad_id.data,\n tel_no=form.tel_no.data,\n role_id=2 , ##form.role_id.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(employee)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def create_account(request):\n if request.POST:\n try:\n username, password = create_account_form(request, request.POST)\n\n user = authenticate(username=username, password=password)\n if user is None:\n messages.add_message(request, messages.ERROR, 'Oops! Something went wrong.')\n hospitals = Hospital.objects.all()\n return render(request, 'create_account.html', {'hospitals': hospitals})\n login(request, user)\n return redirect('base_dashboard')\n except ValueError:\n pass\n\n hospitals = Hospital.objects.all()\n\n return render(request, 'create_account.html', {'hospitals': hospitals})", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n respond_data = self.controller.add_team_employee(request_data=request_data)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(respond_data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.EmployeeDoesNotExist,\n domain_exceptions.EmployeeIsATeamMember\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def funding_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n funding_form = FundingForm()\n return render_to_response('funding_form.html', {'form': funding_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n funding_form = FundingForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if funding_form.is_valid():\n of = funding_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('funding_form.html', \n {'form': funding_form, 'form_errors': funding_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_view(request, title, modelform, **kwargs):\n instance_form = modelform(request.POST or None)\n if instance_form.is_valid():\n instance = instance_form.save(commit=False)\n for default in kwargs.keys():\n setattr(instance, default, kwargs[default])\n instance.save()\n messages.success(request, _(\"%s was created.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Create\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))", "def award_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n award_form = AwardForm()\n return render_to_response('award_form.html', {'form': award_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n award_form = AwardForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if award_form.is_valid():\n af = award_form.save(commit=False)\n af.company = company\n af.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('award_form.html', \n {'form': award_form, 'form_errors': award_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def dataset(request):\n from trytond.transaction import Transaction\n from trytond.tests.test_tryton import USER, CONTEXT, DB_NAME, POOL\n\n Party = POOL.get('party.party')\n Company = POOL.get('company.company')\n Country = POOL.get('country.country')\n Subdivision = POOL.get('country.subdivision')\n Employee = POOL.get('company.employee')\n Currency = POOL.get('currency.currency')\n User = POOL.get('res.user')\n FiscalYear = POOL.get('account.fiscalyear')\n Sequence = POOL.get('ir.sequence')\n AccountTemplate = POOL.get('account.account.template')\n Account = POOL.get('account.account')\n Journal = POOL.get('account.journal')\n PaymentGateway = POOL.get('payment_gateway.gateway')\n AccountCreateChart = POOL.get('account.create_chart', type=\"wizard\")\n\n with Transaction().start(DB_NAME, USER, context=CONTEXT) as transaction:\n # Create company, employee and set it user's current company\n usd, = Currency.create([{\n 'name': 'US Dollar',\n 'code': 'USD',\n 'symbol': '$',\n }])\n\n country_us, = Country.create([{\n 'name': 'United States',\n 'code': 'US',\n }])\n subdivision_florida, = Subdivision.create([{\n 'name': 'Florida',\n 'code': 'US-FL',\n 'country': country_us.id,\n 'type': 'state'\n }])\n subdivision_california, = Subdivision.create([{\n 'name': 'California',\n 'code': 'US-CA',\n 'country': country_us.id,\n 'type': 'state'\n }])\n\n company_party, = Party.create([{\n 'name': 'ABC Corp.',\n 'addresses': [('create', [{\n 'name': 'ABC Corp.',\n 'street': '247 High Street',\n 'zip': '94301-1041',\n 'city': 'Palo Alto',\n 'country': country_us.id,\n 'subdivision': subdivision_california.id,\n }])],\n 'contact_mechanisms': [('create', [{\n 'type': 'phone',\n 'value': '123456789'\n }])]\n }])\n\n employee_party, = Party.create([{\n 'name': 'Prakash Pandey',\n }])\n company, = Company.create([{\n 'party': company_party.id,\n 'currency': usd.id,\n }])\n employee, = Employee.create([{\n 'party': employee_party.id,\n 'company': company.id,\n }])\n User.write(\n [User(USER)], {\n 'main_company': company.id,\n 'company': company.id,\n }\n )\n CONTEXT.update(User.get_preferences(context_only=True))\n\n # Create fiscal year\n date = datetime.date.today()\n\n post_move_sequence, = Sequence.create([{\n 'name': '%s' % date.year,\n 'code': 'account.move',\n 'company': company.id,\n }])\n\n fiscal_year, = FiscalYear.create([{\n 'name': '%s' % date.year,\n 'start_date': date + relativedelta(month=1, day=1),\n 'end_date': date + relativedelta(month=12, day=31),\n 'company': company.id,\n 'post_move_sequence': post_move_sequence.id,\n }])\n FiscalYear.create_period([fiscal_year])\n\n # Create minimal chart of account\n account_template, = AccountTemplate.search([\n ('parent', '=', None),\n ('name', '=', 'Minimal Account Chart')\n ])\n\n session_id, _, _ = AccountCreateChart.create()\n create_chart = AccountCreateChart(session_id)\n create_chart.account.account_template = account_template\n create_chart.account.company = company\n create_chart.transition_create_account()\n\n receivable, = Account.search([\n ('kind', '=', 'receivable'),\n ('company', '=', company.id),\n ])\n payable, = Account.search([\n ('kind', '=', 'payable'),\n ('company', '=', company.id),\n ])\n create_chart.properties.company = company\n create_chart.properties.account_receivable = receivable\n create_chart.properties.account_payable = payable\n create_chart.transition_create_properties()\n\n account_revenue, = Account.search([\n ('kind', '=', 'revenue')\n ])\n account_expense, = Account.search([\n ('kind', '=', 'expense')\n ])\n\n # Create customer\n customer, = Party.create([{\n 'name': 'John Doe',\n 'addresses': [('create', [{\n 'name': 'John Doe',\n 'street': '250 NE 25th St',\n 'zip': '33137',\n 'city': 'Miami, Miami-Dade',\n 'country': country_us.id,\n 'subdivision': subdivision_florida.id,\n }])],\n 'contact_mechanisms': [('create', [{\n 'type': 'phone',\n 'value': '123456789'\n }])]\n }])\n\n cash_journal, = Journal.search(\n [('type', '=', 'cash')], limit=1\n )\n Journal.write([cash_journal], {\n 'debit_account': account_expense.id\n })\n\n stripe_gateway = PaymentGateway(\n name='Credit Card - Stripe',\n journal=cash_journal,\n provider='stripe',\n method='credit_card',\n stripe_api_key=\"sk_test_Xw6QdFU31e8mcmcdeMt7DoiE\",\n test=True\n )\n stripe_gateway.save()\n\n result = {\n 'customer': customer,\n 'company': company,\n 'stripe_gateway': stripe_gateway,\n }\n\n transaction.commit()\n\n def get():\n from trytond.model import Model\n\n for key, value in result.iteritems():\n if isinstance(value, Model):\n result[key] = value.__class__(value.id)\n return namedtuple('Dataset', result.keys())(**result)\n\n return get", "def get(self, request, *args, **kwargs):\n organization_form = organization.forms.OrganizationForm()\n user_form = organization.forms.UserForm()\n # print(pet_form, pet_video_form)\n context = {'organization_form': organization_form,'user_form': user_form}\n context.update(django.core.context_processors.csrf(request))\n return django.shortcuts.render_to_response('organization/organization_insert.html', context)", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def perform_create(self, serializer):\n if self.request.data.get('user_type', None) == 'employee':\n serializer.save(is_staff=False)\n else:\n serializer.save()", "def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def test_admin_can_create_a_employee(self):\n\n account_data = {\n \"username\": \"Mike\",\n \"email\": \"[email protected]\",\n \"password\": \"1234567\",\n \"confirm_password\": \"1234567\"\n }\n response = self.client.post(\n reverse('accounts:create-user'),\n account_data,\n format=\"json\")\n \"\"\"Test the api has bucket creation capability.\"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(\"data\" in json.loads(response.content))", "def complaint(request):\n if request.method == 'POST':\n form = NewComplaint(request.POST)\n new_comp = Complaint(title=form.data['title'],\n text=form.data['text'],\n date=datetime.datetime.now(),\n author_id=request.user.id,\n author_name=request.user)\n new_comp.save()\n return redirect('home')\n\n context = get_context()\n context['title'] = u'Создание новой жалобы'\n context['form'] = NewComplaint()\n\n return render(request, 'complaint.html', context)", "def home(request):\n \n if request.method == \"POST\":\n if request.POST['formname'] == 'registration':\n company_detail_form = CompanyForm(request.POST)\n form_login = LoginForm()\n if company_detail_form.is_valid():\n cd = company_detail_form.cleaned_data\n try:\n user = request.user\n company_obj = Company.objects.get(email=request.user.email)\n company_obj.name = cd['title']\n company_obj.email = cd['email']\n company_obj.website = cd['website']\n company_obj.street1 = cd['street1']\n company_obj.street2 = cd['street2']\n company_obj.zip_code = cd['post_code']\n company_obj.country = cd['country']\n company_obj.phone_number = cd['phone_number']\n company_obj.category = cd['industry_type']\n company_obj.business_year_start = cd['business_year_start']\n company_obj.business_year_end = cd['business_year_end']\n company_obj.description = cd['description']\n company_obj.save()\n for weekday in cd['weekdays']:\n company_obj.weekdays.add(weekday)\n if user:\n address = request.user.email\n redirect_to = '/home/registration_update?email=%s' % \\\n address\n \n subject='[HRMSystems]'\n message = \"\"\"Hi %s,\n \n You account information has been successfully updated with HRMSystems.\n \n Regards,\n HRMSystems Team\n \n \"\"\" % (user.username)\n email = EmailMessage(subject, message, to=[address])\n email.send()\n \n except:\n user = register_user(cd)\n company_obj = Company.objects.get_or_create(\n name = cd['title'],\n \n email = cd['email'],\n website = cd['website'],\n street1 = cd['street1'],\n street2 = cd['street2'],\n zip_code = cd['post_code'],\n country = cd['country'],\n phone_number = cd['phone_number'],\n category = cd['industry_type'],\n business_year_start = cd['business_year_start'],\n business_year_end = cd['business_year_end'],\n description = cd['description'],\n )\n \n if user:\n address = user.email\n redirect_to = 'registration/registration_confirmation?email=%s' % \\\n address\n \n subject='[HRMSystems]'\n message = \"\"\"Hi %s,\n \n You have successfully registered with HRMSystems.\n \n Please follow the link below to complete your registration:\n \n http://%s/registration/verify_registration/?key=%s&username=%s\n \n Regards,\n HRMSystems Team\n \n \"\"\" % (user.username, BASE_URL, user.profile.key, \n user.username)\n email = EmailMessage(subject, message, to=[address])\n email.send()\n else:\n redirect_to = 'registration/registration_failure'\n return HttpResponseRedirect(redirect_to)\n else:\n print \"Form is not valid\"\n \n if request.POST['formname'] == 'login':\n form_login = LoginForm(request.POST)\n company_detail_form = CompanyForm()\n if form_login.is_valid():\n data = form_login.cleaned_data\n user = authenticate(username=data['username'], \n password=data['password'])\n login(request, user)\n if user.is_staff == True:\n return HttpResponseRedirect('/registration/summary/?user=company&active=summary')\n elif user.userprofile_set.values('is_supervisor')[0].get('is_supervisor') == True:\n return HttpResponseRedirect('/registration/supervisor_detail/')\n else:\n return HttpResponseRedirect('/registration/employee_homepage/')\n\n else:\n company_detail_form = CompanyForm()\n form_login = LoginForm()\n try:\n company_obj = Company.objects.get(email=request.user.email)\n company_detail_form.fields['title'].initial = company_obj.name\n company_detail_form.fields['email'].initial = company_obj.email\n company_detail_form.fields['website'].initial = company_obj.website\n company_detail_form.fields['weekdays'].initial = company_obj.weekdays.all()\n company_detail_form.fields['street1'].initial = company_obj.street1\n company_detail_form.fields['street2'].initial = company_obj.street2\n company_detail_form.fields['post_code'].initial = company_obj.zip_code\n company_detail_form.fields['country'].initial = company_obj.country\n company_detail_form.fields['phone_number'].initial = company_obj.phone_number\n company_detail_form.fields['industry_type'].initial = company_obj.category\n company_detail_form.fields['business_year_start'].initial = company_obj.business_year_start\n company_detail_form.fields['business_year_end'].initial = company_obj.business_year_end\n \n except:\n pass\n return render_to_response(\n 'home/home_page.html',\n {\n 'company_detail_form':company_detail_form,\n 'form_login' : form_login,\n 'request':request,\n 'base_url':BASE_URL,\n 'active':'home'\n },\n context_instance = RequestContext(request)\n )", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "def create_account_request(request):\n if request.method == \"POST\":\n form = NewAccountForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Creation successful.\")\n return redirect(\"home\")\n messages.error(request, \"Unsuccessful creation. Invalid information.\")\n form = NewAccountForm\n customer_list = Customer.objects.all()\n context = {'customer_list': customer_list, 'account_form': form}\n return render(request, \"accounts/account_creation.html\", context)", "def clients_moral_personne_new_view(request):\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['client_edition']):\n raise exc.HTTPForbidden()\n\n # Get clientMoralPersonne instance\n model = Utils.set_model_record(ClientMoralPersonne(), request.params)\n\n request.dbsession.add(model)\n\n return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(ClientMoralPersonne.__tablename__))", "def competitors_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n competitors_reference = get_object_or_404(Competitors, id=id,company=company)\n\n return render_to_response('competitors_form.html', \n {'details': competitors_reference,'info':competitors_reference},\n context_instance=RequestContext(request))", "def employee_login():\n return Response(render_template('admin/login.html'))", "def test_create_company_props_using_post(self):\n pass", "def create(self, values):\n if values.get('country_id', False):\n country = self.env['res.country'].browse(values['country_id'])\n if country.code == 'SA':\n values.update({'is_saudi': True})\n else:\n values.update({'is_saudi': False})\n\n res = super(HrEmployee, self).create(values)\n if values.get('user_id', False):\n self.user_id.write({'employee_id': res})\n return res", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def register():\n\n form=RegistrationForm()\n\n if form.validate_on_submit():\n\n #employee = Employee()\n #employee.email= form.email.data,\n #employee.username = form.username.data, \n # or \n employee = Employee(\n email= form.email.data,\n username= form.username.data,\n first_name= form.first_name.data,\n last_name= form.last_name.data,\n password= form.password.data\n )\n\n # add employee to database\n\n db.session.add(employee)\n db.session.commit()\n flash('You have successfully regstered. You may now login')\n\n # redirect to the Login Page\n # instead of auth.login we can write .login only\n # Here . refers to current blueprint\n\n return redirect(url_for('auth.login'))\n\n # if validation fails,return again to Registration page\n\n return render_template('auth/register.html',form=form,title='Register')", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def get_create_employee_query(self):\n template = \"\"\"\n mutation createEmployee {{\n createEmployee(input: {{ {params} }}) {{\n employee {{\n name\n hiredOn\n salary\n departmentId\n }}\n }}\n }}\n \"\"\"\n # Add input parameters as needed\n input_params = 'name:\"{}\",'.format(self.name)\n\n if self.hired_on is not None:\n input_params += 'hiredOn: \"{}\", '.format(self.hired_on)\n\n if self.salary is not None:\n input_params += 'salary: {}, '.format(self.salary)\n\n if self.department_id is not None:\n input_params += 'departmentId: {}'.format(self.department_id)\n\n return template.format(params=input_params)", "def create_coll(request):\n form = CollForm(request)\n c = {}\n return render(request, \"browse/curate.html\", c)", "def test_office_creation(self):\n url = '/api/v1/consultorios/'\n data = {\n \"hospital\": \"Angeles Roma\",\n \"office\": \"306\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)", "def test_create_company_1(self):\n company_data = {\n \"_id\": \"sbucks\",\n \"headquarters\": \"Seattle\",\n \"name\": \"Starbucks Inc.\",\n }\n\n resp = self.app.post('/companies', data=json.dumps(company_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CREATED)\n\n # cleanup\n del_resp = self.app.delete(f'/companies/{company_data[\"_id\"]}')\n self.assertEqual(del_resp.status_code, HTTPStatus.OK)", "def post(self):\n return self.get_request_handler(request.headers).create_new_employment_status(request)", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def add_hospital(request):\n if request.POST:\n post = request.POST\n name = post.get(\"name\")\n address = post.get(\"address\")\n city = post.get(\"city\")\n state = post.get(\"state\")\n zip = post.get(\"zip\")\n hospital = Hospital.objects.create(\n name=name,\n address=address,\n city=city,\n state=state,\n zip=zip\n )\n\n if hospital:\n return redirect('add_hospital')\n\n return render(request, 'add_hospital.html')", "def edit(request, company_id=None):\n if company_id:\n company = get_object_or_404(Company, id=company_id)\n if request.POST and company.owner == request.user:\n form = CompanyForm(request.POST, instance=company)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/companies')\n if company.owner != request.user:\n return HttpResponseForbidden()\n form = CompanyForm(instance=company)\n context = dict(form=form)\n return render(request, 'companies/edit.html', context)\n else:\n companies = Company.objects.filter(owner=request.user)\n context = dict(companies=companies)\n return render(request, 'companies/companies_by_user.html', context)", "def edit_employee_certifications(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Cert_Form(obj = employee)\n \n form.cert.choices = db.session.query(Cert.id , Cert.cert_name).all()\n \n \n if form.validate_on_submit():\n \n cert = Cert.query.get(form.cert.data) \n \n\n if cert.expire:\n received = form.received.data\n year = received.year\n month = received.month\n day = received.day\n\n start_date = datetime(year = year, month = month, day = day)\n change_unit = cert.good_for_unit\n change_time = cert.good_for_time\n \n if change_unit == \"days\": \n delta = timedelta(days = change_time)\n elif change_unit == \"weeks\":\n delta = timedelta(days = change_time * 7)\n elif change_unit == \"months\":\n delta = timedelta(days = change_time * 30)\n else:\n delta = timedelta(days = change_time * 365)\n\n due_date = start_date + delta\n employees = employee_certification(employee_id = employee_id, cert_id = cert.id, received = received, due_date = due_date)\n \n #cert.employees.append(employee))\n #db.session.add(cert)\n #employee.certs.append(dates)\n db.session.add(employees)\n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)", "def test_create_company_2(self):\n company_data = {\n \"_id\": \"sbucks\",\n \"headquarters\": \"Seattle\",\n \"name\": \"Starbucks Inc.\",\n }\n\n resp = self.app.post('/companies', data=json.dumps(company_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CREATED)\n\n resp = self.app.post('/companies', data=json.dumps(company_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CONFLICT)\n\n # cleanup\n del_resp = self.app.delete(f'/companies/{company_data[\"_id\"]}')\n self.assertEqual(del_resp.status_code, HTTPStatus.OK)", "def test_access_employee(self):\n # Employee can't see any SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).read()\n # Employee can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).write({'team_id': self.company_data['default_sale_team'].id})\n # Employee can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_employee']).create({\n 'partner_id': self.partner_a.id,\n })\n # Employee can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).unlink()", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def create_tea():\n # Bypass if user is logged in\n\n form = TeaAddForm()\n # Validate login attempt\n if form.validate_on_submit():\n tea = Tea(\n name=form.name.data,\n price_per_gram=form.price_per_gram.data,\n )\n db.session.add(tea)\n db.session.commit() # Create new tea\n return redirect(url_for(\"main_bp.dashboard\"))\n return render_template(\n \"add_tea.jinja2\",\n title=\"Add a Tea\",\n form=form,\n template=\"add_tea-page\",\n body=\"Add a Tea\",\n )", "def create_account():\n\n return render_template('account.html')", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def create_and_join(request):\n c = {}\n c.update(csrf(request))\n if request.method == 'POST': # If the form has been submitted...\n form = TeamForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n team = form.save()\n member = request.user.member\n member.team = team\n member.save()\n messages.add_message(request, messages.SUCCESS, 'Team info created!')\n return HttpResponseRedirect(reverse('team_details', args=(team.id,)))\n else:\n form = TeamForm() # An unbound form\n\n return render_to_response(\"teams/create_and_join.html\", {'form': form, 'c':c},\n context_instance=RequestContext(request))", "def test_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={\"path\": \"manager?project=ProjectTest\", \"action\": \"redirect\", \"status\": \"success\"},\n status=200\n )\n\n self.azk.create(self.project, self.description)", "def open_create_partner(self, cr, uid, ids, context=None):\n view_obj = self.pool.get('ir.ui.view')\n view_id = view_obj.search(cr, uid, [('model', '=', self._name), \\\n ('name', '=', self._name+'.view')])\n return {\n 'view_mode': 'form',\n 'view_type': 'form',\n 'view_id': view_id or False,\n 'res_model': self._name,\n 'context': context,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }", "def create(self, validated_data):\n admins = Group.objects.create(\n name=validated_data['name'] + ' Admins')\n accountants = Group.objects.create(\n name=validated_data['name'] + ' Accountants')\n validated_data['accountants'] = accountants\n validated_data['admins'] = admins\n company = super(CompanySerializer, self).create(validated_data)\n company.save()\n return company", "def create(self, vals):\n if not vals.get('nik_number'):\n vals['nik_number'] = self.generate_nik(vals)\n return super(Employee, self).create(vals)", "def post(self):\r\n piso=self.request.get('piso')\r\n numext=self.request.get('numext')\r\n numint=self.request.get('numint')\r\n piso=self.validonumero(piso)\r\n numext=self.validonumero(numext)\r\n numint=self.validonumero(numint)\r\n \r\n empresa=empresas()\r\n empresa.nombre=self.request.get('desc')\r\n empresa.calle=self.request.get('calle')\r\n empresa.numeroExterior=int(numext)\r\n empresa.numeroInterior=int(numint)\r\n empresa.colonia=self.request.get('colonia')\r\n empresa.piso=int(piso)\r\n empresa.andador=self.request.get('andador')\r\n empresa.codigo_postal=int(self.request.get('cp'))\r\n empresa.sitioweb=self.request.get('web')\r\n empresa.correo=self.request.get('mail')\r\n empresa.nombreContacto=\"\"\r\n empresa.paternoContacto=\"\"\r\n empresa.maternoContacto=\"\"\r\n #### \r\n ciudad=self.request.get('ciudad')\r\n query=\"where ciudad='%s'\"%ciudad\r\n cd=ciudades.gql(query)\r\n city=cd.fetch(1)\r\n for lstcd in city:\r\n empresa.id_Ciudad=lstcd.key().id()\r\n empresa.put()\r\n jsondic={}\r\n jsondata=[]\r\n jsondata+=[self.addKey(jsondic,\"Dato\", empresa.key().id())]\r\n self.response.out.write(simplejson.dumps(jsondata))\r\n return False", "def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())", "def new_user(request, **kwargs):\n varz = {}\n varz.update(kwargs)\n c = RequestContext(request, varz)\n t = loader.get_template('family_info/add_edit_user.html')\n return HttpResponse(t.render(c))", "def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')", "def new(self, **kw):\n curso = self.get_curso_actual()\n ejercicios = curso.ejercicios_activos\n q_score = Entrega.selectBy(inicio=None, fin=None, entregador=identity.current.user.get_inscripcion(curso)).count()\n if len(ejercicios) == 0:\n flash(_(u'Al momento, no hay ningún ejercicio con instancias de entrega abiertas.'))\n if q_score > 0:\n flash(_(u'Usted tiene un ejercicio en espera de ser aceptado. No envíe otro hasta tener la respuesta del primero.'))\n ejercicio_options = [(0, 'Seleccionar')] + [(e.id, e.shortrepr()) for e in ejercicios]\n return dict(name=name, namepl=namepl, form=form, values=kw, options=dict(ejercicio=ejercicio_options))", "def create_new_project(self,\n customer_name,\n contract_date,\n project_info,\n project_datest,\n project_dateend,\n project_budget,\n project_actst=None,\n project_actend=None,\n project_cost=None):\n\n customer_info = self.query_customer(cus_name=customer_name)\n\n if customer_info:\n # Search for project manager in the same region as the customer.\n customer_region_id = customer_info[0][1]\n get_employee_query = \"select employee.emp_id, emp_lname, emp_fname from employee, \" \\\n \"empskill, skill, region where employee.emp_id = \" \\\n \"empskill.emp_id and empskill.skill_id = \" \\\n \"skill.skill_id and skill.skill_descrpt = \" \\\n \"'Project Manager' and region.region_id = \" \\\n \"employee.region_id and region.region_id = '{}' \"\n try:\n self.dbCursor.execute(\n get_employee_query.format(customer_region_id))\n employee_info = self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n if len(employee_info) == 0:\n ErrorMessageWindow(\"No suitable project manager found!\")\n else:\n if customer_info and employee_info:\n if len(customer_info) > 1:\n MultiRowScreen(customer_info, \"project\")\n else:\n cus_id = customer_info[0][0]\n emp_id = employee_info[0][0]\n optional_inputs = [project_actst, project_actend,\n project_cost]\n\n query = \"insert into project(cus_id, emp_id, proj_date, \" \\\n \"proj_descrpt, proj_estdatest, proj_estdateend, \" \\\n \"proj_estbudget) values ('{}', '{}', '{}', '{}', \" \\\n \"'{}', '{}', '{}') \".format(cus_id,\n emp_id,\n contract_date,\n project_info,\n project_datest,\n project_dateend,\n project_budget)\n\n yes_options = False\n for item in optional_inputs:\n if item != \"\":\n yes_options = True\n\n if yes_options is False:\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n option_names = [\"proj_actdatest\",\n \"proj_actdateend\",\n \"proj_actcost\"]\n options_index = []\n filled_options = []\n\n index = 0\n for item in optional_inputs:\n if item != \"\":\n options_index.append(index)\n filled_options.append(item)\n index += 1\n update_query = \"update project set \"\n\n j = 0\n for i in options_index:\n if j < len(filled_options) - 1:\n update_query += \"{}='{}', \".format(\n option_names[i], filled_options[j]\n )\n else:\n update_query += \"{}='{}' \".format(\n option_names[i], filled_options[j]\n )\n j += 1\n\n try:\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n\n self.dbCursor.execute(update_query)\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Customer not found!\")", "def create_team(request):\n if request.method == 'POST':\n email = request.session.get('email', None)\n team_name = request.POST.get('team_name', None)\n team = Team(name=team_name)\n team.save()\n\n message = \"Team created, please use the cool search feature and assign yourself to the team\"\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams')\n else:\n raise Http404('Not allowed')", "def create(self, values):\n\t\temployee_id = values.get('employee_id', False)\n\t\tprint(\"the val in the dict\", values)\n\t\tif (values.get('date_from') and values.get('date_to')) == False:\n\t\t\tcurrent = datetime.strftime(datetime.today().date(),'%Y-%m-%d')\n\t\t\tvalues.update({'allocate_date': current})\n\t\t\tprint(values)\n\t\tif not values.get('department_id'):\n\t\t\tvalues.update({'department_id': self.env['hr.employee'].browse (employee_id).department_id.id})\n\n\t\tholiday = super (Holidays, self.with_context (mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)\n\t\tholiday.add_follower (employee_id)\n\n\t\t# Trilok code for policies\n\t\tpolicy_id = holiday.env['leaves.policy'].search(\n\t\t\t[('leave_type', '=', holiday.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# print (\"policy iddddddddddddddd\",policy_id)\n\t\temp_type = holiday.employee_id.employee_type.id\n\t\tfor val in policy_id:\n\t\t\tif val.employee_type.id == emp_type:\n\t\t\t\tfor employee in holiday.employee_id:\n\t\t\t\t\tif holiday.type == 'remove':\n\t\t\t\t\t\tquery = '''select count(*) from hr_holidays where upper(type) = upper('rEMove')and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') and now()::date and employee_id = %s''' % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query)\n\t\t\t\t\t\tquery_result = holiday.env.cr.dictfetchone()\n\t\t\t\t\t\t# print(\"query_result\", query_result)\n\t\t\t\t\t\tif val.min_app_per_year > 0 and query_result[\"count\"] > val.min_app_per_year:\n\t\t\t\t\t\t\traise ValidationError(\"maximum number of applications per year is {} days\".format(val.min_app_per_year))\n\n\t\t\t\t\t\tquery1 = '''select create_date::date,date_to::date from hr_holidays where upper(type) = \n\t\t\t\t\t\tupper('rEMove') and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') \n\t\t and now()::date and employee_id = %s order by create_date desc limit 1'''\\\n\t\t\t\t\t\t\t\t % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query1)\n\t\t\t\t\t\tquery_result1 = holiday.env.cr.fetchall()\n\t\t\t\t\t\tif query_result1 is not None:\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1)\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1[0][0], query_result1[0][1])\n\t\t\t\t\t\t\tcre_date = datetime.strptime(query_result1[0][0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdate_to = datetime.strptime(query_result1[0][1], '%Y-%m-%d')\n\t\t\t\t\t\t\t# print(\"cre_date\", cre_date, type(cre_date))\n\t\t\t\t\t\t\tcurrent_dt = fields.Datetime.now()\n\t\t\t\t\t\t\t# cdate=datetime.strptime(current_dt,'%Y-%m-%d')\n\t\t\t\t\t\t\tcurrent_date = datetime.strptime(current_dt.split(\" \")[0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdays = (current_date - date_to).days\n\t\t\t\t\t\t\tif val.min_leave_app_gap > 0 and days > val.min_leave_app_gap:\n\t\t\t\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\t\t\t\"Minimum gap between two application should be atleast {} days\".format(\n\t\t\t\t\t\t\t\t\t\tval.min_leave_app_gap))\n\n\t\treturn holiday", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def create(ctx, name, company, mail, age):\n client = Client(name,company,mail,age)\n client_service = ClientService(ctx.obj['clients_table']) \n client_service.create_client(client)", "def create_report():\n\n LocalCreateReportForm = CreateReportForm.get_instance()\n for department in Department.query.all():\n if len(department.fields) > 0:\n LocalCreateReportForm.add_department(department)\n\n form = LocalCreateReportForm()\n form.user_id.data = current_user.id\n if form.validate_on_submit():\n # Add the new report to the database\n db.session.add(form.report)\n db.session.commit()\n\n return redirect(url_for('reports.my_reports'))\n else:\n flash_form_errors(form)\n return render_template('reports/create.html', form=form)" ]
[ "0.6837375", "0.67006433", "0.66819894", "0.6620597", "0.6475171", "0.64458215", "0.6421396", "0.6402227", "0.6344205", "0.6299245", "0.626302", "0.6249063", "0.6230102", "0.6173451", "0.61211884", "0.6115985", "0.60931104", "0.6089694", "0.60648185", "0.60492533", "0.6024815", "0.6018779", "0.60011125", "0.5945085", "0.5935821", "0.5935559", "0.5907307", "0.58863086", "0.5843938", "0.5832742", "0.58273184", "0.5820082", "0.58191174", "0.58040535", "0.5802135", "0.5802118", "0.57591116", "0.57044876", "0.5683875", "0.56830883", "0.56809473", "0.5640301", "0.5603535", "0.55627626", "0.55573297", "0.5555687", "0.55512905", "0.5551239", "0.5548571", "0.55412364", "0.55387497", "0.55355614", "0.55232096", "0.5520027", "0.55127555", "0.5509443", "0.5466416", "0.5464148", "0.5462178", "0.54458326", "0.544169", "0.5434255", "0.5407483", "0.53888625", "0.5380676", "0.5378748", "0.53737295", "0.53695273", "0.5364078", "0.536236", "0.53404", "0.53267235", "0.5296156", "0.52769965", "0.5261777", "0.52617586", "0.52546006", "0.5254237", "0.5242755", "0.52423865", "0.52407", "0.5238995", "0.52385795", "0.5224302", "0.5223507", "0.5220026", "0.521377", "0.5213466", "0.5197204", "0.5196364", "0.51938367", "0.5189096", "0.5179037", "0.51735216", "0.5172467", "0.51682466", "0.5156989", "0.514343", "0.5141377", "0.5140699" ]
0.8076184
0
View for creating many employees in company
def create_many_employees(request, company_id=None): company = Company.objects.get(pk=company_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: raise PermissionDenied() if "upload" in request.POST: form = UploadEmployeesForm(request.POST, request.FILES) if form.is_valid(): data = csv_to_dict(request.FILES['file']) request.session['upload_employees'] = data return JsonResponse(status=201, data=form.cleaned_data) # return TemplateResponse( # request, # 'mus/create_many_employees_uploaded.html', # dict(data=data, company=company) # ) elif "next" in request.POST: data = request.session['upload_employees'] marked_data = list() fields = request.POST.getlist('field[]') for row in data: new_row = dict(is_manager=False) for i, item in enumerate(row): field_id = int(fields[i]) if field_id == 1: new_row['first_name'] = item elif field_id == 2: new_row['last_name'] = item elif field_id == 3: p = item.partition(" ") new_row['first_name'] = p[0] new_row['last_name'] = p[2] elif field_id == 4: new_row['email'] = item elif field_id == 5: new_row['username'] = item marked_data.append(new_row) formset = EmployeeRowFormSet(initial=marked_data) # TypeQS = DevelopmentPlanType.objects.filter(Q(company=company) | Q(company__isnull=True)) # for form in formset: # form.fields['development_plan_type'].queryset = TypeQS return TemplateResponse( request, 'mus/create_many_employees_form.html', dict(formset=formset, company=company) ) elif "next2" in request.POST: formset = EmployeeRowFormSet(request.POST) if formset.is_valid(): data = list()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )", "def employee():\n return Response(render_template('employee/employee.html'))", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def company():\n\n company = Company.objects.create(name='Tre G.M.B.H.', country='Germany')\n return company", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def handle(self, *args, **kwargs):\n seeder = Seed.seeder()\n seeder.add_entity(User, 20)\n\n seeder.add_entity(EmployeeMptt, 20, {\n 'user': lambda x: User.objects.filter(employeemptt=None).first(),\n 'parent': lambda x: EmployeeMptt.objects.order_by(\"?\").first(),\n 'level': lambda x: random.randint(0, 4),\n })\n seeder.execute()", "def index(request):\n return render(request, 'companies/index.html', {'companyform': CompanyForm()})", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def get(self,request,*args,**kwargs):\n\n\t\tsucursal = Sucursal.objects.get(id=kwargs['spk'])\n\n\t\tuser_form = UserForm()\n\t\templeado_form = EmpleadoForm( initial={'sucursal':sucursal.id} )\n\n\t\tforms = [user_form,empleado_form]\n\t\tcontext = {\n\t\t'section_title':'Nuevo Empleado',\n\t\t'button_text':'Crear',\n\t\t'sucursal':sucursal,\n\t\t'user_form':user_form,\n\t\t'empleado_form':empleado_form }\n\n\t\treturn render_to_response(\n\t\t\t'empleado/empleado_form.html',\n\t\t\tcontext,\n\t\t\tcontext_instance=RequestContext(request))", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def test_website_companies_create(self):\n pass", "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def employees(self, employees: object):\n\n self._employees = employees", "def employee_add(request, user_id):\n\n if request.method == \"GET\":\n employee = User.objects.get(pk=user_id)\n orgs = Organization.objects.filter(administrator=request.user)\n\n #desig_list = orgs.values_list('designations', flat=True)\n #designations = Designation.objects.filter(pk__in=desig_list)\n\n return render_to_response('organization/employee_add1.html',\n {'employee':employee, 'orgs': orgs},\n context_instance=RequestContext(request))\n\n user_id = request.POST.get('employee_id')\n org = request.POST.get('add2orgs') # id of selected org\n designations_list = request.POST.getlist('designations2add')#list of ids of selected designation\n\n try:\n user = User.objects.get(pk=user_id)# emp obj to add\n org = Organization.objects.get(pk=org)#selected org objects\n designations = Designation.objects.filter(pk__in=designations_list)#convert desig id in obj\n except Exception, e:\n # log this error\n print str(e)\n messages.error(request, str(e))\n user = org = designations = None\n\n if not (user and designations.count()):\n # add error message.\n messages.error(request, \"Select atleast one Organization and its Designation\")\n # redirect to the same page.\n return HttpResponseRedirect(request.path)\n\n #def send_email():\n # template = get_template('organization/confirmation_email.txt')\n # context = Context({'user':user,'org':o.organization,'desig':desig})\n # subject = u'confirmation email'\n # message = template.render(context)\n # send_mail(subject,message,settings.DEFAULT_FROM_EMAIL,['[email protected]'])\n\n # create visiting card(s) for the employee\n try:\n l=[]\n for desig in designations:\n if desig in org.designations.all():\n o,c = VisitingCards.objects.get_or_create(organization=org,\n designation=desig, user=user)\n\n if c:\n #send_email()\n template = get_template('organization/confirmation_email.txt')\n context = Context({'user':user,'org':o.organization,'desig':desig})\n subject = u'confirmation email'\n message = template.render(context)\n msg = (subject,message,settings.DEFAULT_FROM_EMAIL, ['[email protected]'])\n l.append(msg)\n #t=threading.Thread(target=send_email)\n #t.start()\n messages.success(request,\"Employee Added Succefully..!\")\n else:\n messages.info(request,\" %s Already Added ..!\" % user.username)\n else:\n messages.error(request,\"%s Doesnt have selected designation %s .!\" %(orgs, desig))\n raise Exception()\n if c:\n tupl=tuple(l)\n threading.Thread(send_mass_mail(tupl, fail_silently=False)).start()\n messages.success(request,\"Employee Added Succefully..!\")\n\n\n\n except:\n messages.error(request,\"something went wroung \")\n return HttpResponseRedirect(request.path)\n\n return HttpResponseRedirect(\"/org/user/list\")", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def test_new_employer_crud_methods(self):\n response = self.client.post(\n '/employers/', self.new_employer_data, format='json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(len(Employer.objects.all()), 2)\n\n # test one employer retrieve\n response = self.client.get('/employers/1/')\n self.assertEqual(response.status_code, 200)\n self.assertIn('Andela', response.data['name'])\n\n # test one employer update\n response = self.client.put('/employers/1/',\n {'name': 'New Employer'})\n self.assertEqual(response.status_code, 200)\n self.assertIn('New Employer', response.data['name'])", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def get_create_employee_query(self):\n template = \"\"\"\n mutation createEmployee {{\n createEmployee(input: {{ {params} }}) {{\n employee {{\n name\n hiredOn\n salary\n departmentId\n }}\n }}\n }}\n \"\"\"\n # Add input parameters as needed\n input_params = 'name:\"{}\",'.format(self.name)\n\n if self.hired_on is not None:\n input_params += 'hiredOn: \"{}\", '.format(self.hired_on)\n\n if self.salary is not None:\n input_params += 'salary: {}, '.format(self.salary)\n\n if self.department_id is not None:\n input_params += 'departmentId: {}'.format(self.department_id)\n\n return template.format(params=input_params)", "def new_computer(request):\n\n if request.method == \"POST\":\n\n try:\n make = request.POST[\"make\"]\n model = request.POST[\"model\"]\n serial_no = request.POST[\"serial_no\"]\n purchase_date = request.POST[\"purchase_date\"]\n employee_id = request.POST[\"employee\"]\n employee = Employee.objects.get(pk=employee_id)\n\n if make is \"\" or model is \"\" or serial_no is \"\" or purchase_date is \"\":\n return render(request, \"agileHR/computer_new.html\", {\n \"error_message\": \"Please fill out all fields\",\n \"make\": make,\n \"model\": model,\n \"serial_no\": serial_no,\n \"purchase_date\": purchase_date\n })\n else:\n now = datetime.datetime.now()\n new_computer = Computer(make=make, model=model, serial_no=serial_no, purchase_date=purchase_date)\n new_computer.save()\n join = EmployeeComputer.objects.create(\n computer = new_computer,\n employee = employee,\n date_assigned = now\n )\n join.save()\n\n return HttpResponseRedirect(reverse(\"agileHR:computer_detail\", args=(new_computer.id,)))\n except KeyError:\n return render(request, \"agileHR/computer_new.html\", {\n \"error_message\": \"Please fill out all fields\"\n })\n else:\n # Get all computer assignment history\n computer_assignments = EmployeeComputer.objects.all()\n\n # Get employees who have had a computer but do not currently have one.\n need_computers = Employee.objects.exclude(employeecomputer__date_revoked=None).order_by('last_name')\n\n # Get employees who have never had a computer.\n never_computers = Employee.objects.exclude(employeecomputer__in=computer_assignments).order_by('last_name')\n\n # Combine the two querysets\n final_list = need_computers | never_computers\n\n context = {\n \"employees\": final_list\n }\n\n return render(request, \"agileHR/computer_new.html\", context)", "def dataset(request):\n from trytond.transaction import Transaction\n from trytond.tests.test_tryton import USER, CONTEXT, DB_NAME, POOL\n\n Party = POOL.get('party.party')\n Company = POOL.get('company.company')\n Country = POOL.get('country.country')\n Subdivision = POOL.get('country.subdivision')\n Employee = POOL.get('company.employee')\n Currency = POOL.get('currency.currency')\n User = POOL.get('res.user')\n FiscalYear = POOL.get('account.fiscalyear')\n Sequence = POOL.get('ir.sequence')\n AccountTemplate = POOL.get('account.account.template')\n Account = POOL.get('account.account')\n Journal = POOL.get('account.journal')\n PaymentGateway = POOL.get('payment_gateway.gateway')\n AccountCreateChart = POOL.get('account.create_chart', type=\"wizard\")\n\n with Transaction().start(DB_NAME, USER, context=CONTEXT) as transaction:\n # Create company, employee and set it user's current company\n usd, = Currency.create([{\n 'name': 'US Dollar',\n 'code': 'USD',\n 'symbol': '$',\n }])\n\n country_us, = Country.create([{\n 'name': 'United States',\n 'code': 'US',\n }])\n subdivision_florida, = Subdivision.create([{\n 'name': 'Florida',\n 'code': 'US-FL',\n 'country': country_us.id,\n 'type': 'state'\n }])\n subdivision_california, = Subdivision.create([{\n 'name': 'California',\n 'code': 'US-CA',\n 'country': country_us.id,\n 'type': 'state'\n }])\n\n company_party, = Party.create([{\n 'name': 'ABC Corp.',\n 'addresses': [('create', [{\n 'name': 'ABC Corp.',\n 'street': '247 High Street',\n 'zip': '94301-1041',\n 'city': 'Palo Alto',\n 'country': country_us.id,\n 'subdivision': subdivision_california.id,\n }])],\n 'contact_mechanisms': [('create', [{\n 'type': 'phone',\n 'value': '123456789'\n }])]\n }])\n\n employee_party, = Party.create([{\n 'name': 'Prakash Pandey',\n }])\n company, = Company.create([{\n 'party': company_party.id,\n 'currency': usd.id,\n }])\n employee, = Employee.create([{\n 'party': employee_party.id,\n 'company': company.id,\n }])\n User.write(\n [User(USER)], {\n 'main_company': company.id,\n 'company': company.id,\n }\n )\n CONTEXT.update(User.get_preferences(context_only=True))\n\n # Create fiscal year\n date = datetime.date.today()\n\n post_move_sequence, = Sequence.create([{\n 'name': '%s' % date.year,\n 'code': 'account.move',\n 'company': company.id,\n }])\n\n fiscal_year, = FiscalYear.create([{\n 'name': '%s' % date.year,\n 'start_date': date + relativedelta(month=1, day=1),\n 'end_date': date + relativedelta(month=12, day=31),\n 'company': company.id,\n 'post_move_sequence': post_move_sequence.id,\n }])\n FiscalYear.create_period([fiscal_year])\n\n # Create minimal chart of account\n account_template, = AccountTemplate.search([\n ('parent', '=', None),\n ('name', '=', 'Minimal Account Chart')\n ])\n\n session_id, _, _ = AccountCreateChart.create()\n create_chart = AccountCreateChart(session_id)\n create_chart.account.account_template = account_template\n create_chart.account.company = company\n create_chart.transition_create_account()\n\n receivable, = Account.search([\n ('kind', '=', 'receivable'),\n ('company', '=', company.id),\n ])\n payable, = Account.search([\n ('kind', '=', 'payable'),\n ('company', '=', company.id),\n ])\n create_chart.properties.company = company\n create_chart.properties.account_receivable = receivable\n create_chart.properties.account_payable = payable\n create_chart.transition_create_properties()\n\n account_revenue, = Account.search([\n ('kind', '=', 'revenue')\n ])\n account_expense, = Account.search([\n ('kind', '=', 'expense')\n ])\n\n # Create customer\n customer, = Party.create([{\n 'name': 'John Doe',\n 'addresses': [('create', [{\n 'name': 'John Doe',\n 'street': '250 NE 25th St',\n 'zip': '33137',\n 'city': 'Miami, Miami-Dade',\n 'country': country_us.id,\n 'subdivision': subdivision_florida.id,\n }])],\n 'contact_mechanisms': [('create', [{\n 'type': 'phone',\n 'value': '123456789'\n }])]\n }])\n\n cash_journal, = Journal.search(\n [('type', '=', 'cash')], limit=1\n )\n Journal.write([cash_journal], {\n 'debit_account': account_expense.id\n })\n\n stripe_gateway = PaymentGateway(\n name='Credit Card - Stripe',\n journal=cash_journal,\n provider='stripe',\n method='credit_card',\n stripe_api_key=\"sk_test_Xw6QdFU31e8mcmcdeMt7DoiE\",\n test=True\n )\n stripe_gateway.save()\n\n result = {\n 'customer': customer,\n 'company': company,\n 'stripe_gateway': stripe_gateway,\n }\n\n transaction.commit()\n\n def get():\n from trytond.model import Model\n\n for key, value in result.iteritems():\n if isinstance(value, Model):\n result[key] = value.__class__(value.id)\n return namedtuple('Dataset', result.keys())(**result)\n\n return get", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def create(self, vals):\n if not vals.get('nik_number'):\n vals['nik_number'] = self.generate_nik(vals)\n return super(Employee, self).create(vals)", "def create_job_detail(company_name, job_title, application_deadline, job_listing_url, state, city, application_listed, salary):\n\n job_detail = JobDetail(company_name = company_name, job_title = job_title, application_deadline = application_deadline, job_listing_url = job_listing_url, state = state , city = city, application_listed = application_listed, salary = salary)\n db.session.add(job_detail)\n db.session.commit()\n\n return job_detail", "def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def get_employees(self):\n return self.employees", "def competitors_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n competitors_form = CompetitorsForm()\n return render_to_response('competitors_form.html', {'form': competitors_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n competitors_form = CompetitorsForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if competitors_form.is_valid():\n cf = competitors_form.save(commit=False)\n\n #verify if other companies with the same info exists anywhere\n try: \n comparison = Competitors.objects.get(name=cf.name,company= company)\n \n if str(comparison.name) != str(cf.name):\n cf.company = company\n cf.save()\n \n else:\n form_errors = {\"Name - The competitor \" + str(comparison.name).capitalize() + \" has been already created for \"+ str(company.name).capitalize() + \".\"}\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': form_errors, 'company':company},\n context_instance=RequestContext(request))\n\n except Competitors.DoesNotExist :\n cf.company = company\n cf.save()\n\n\n \n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': competitors_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def add(request):\n if request.method == 'POST':\n form = CompanyForm(request.POST, request.FILES)\n if form.is_valid():\n form.instance.owner = request.user\n form.save()\n url = reverse('companies_list_all')\n return HttpResponseRedirect(url)\n else:\n form = CompanyForm()\n\n context = dict(form=form)\n return render(request, 'companies/add.html', context)", "def detail(request, company_id):\n company = get_object_or_404(Company, pk=company_id)\n\n company_form = CompanyForm(instance=company)\n contact_form = ContactCreationForm()\n\n return render(request, 'companies/detail.html', {\n 'company_detail': company,\n 'company_form': company_form,\n 'contact_form': contact_form\n })", "def getEmployees(self):\n return self.employees", "def list_all(request):\n companies = Company.objects.order_by('-created')\n context = dict(companies=companies)\n return render(request, 'companies/all.html', context)", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def setEmployees(self, employees):\n self.employees = employees", "def create_calendar(request):\n if request.method == 'POST':\n\n form = CalendarForm(request.POST)\n \n if form.is_valid():\n calendar = form.save(commit=False) # prvent form from saving since we need to link company\n calendar.company = request.user.company\n calendar.save()\n return redirect('appointment:calendar_list')\n else:\n form = CalendarForm()\n return render(request, 'calendar_form.html', {'form': form})", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def clients_moral_personne_new_view(request):\n # Check authorization\n if not Utils.has_permission(request, request.registry.settings['client_edition']):\n raise exc.HTTPForbidden()\n\n # Get clientMoralPersonne instance\n model = Utils.set_model_record(ClientMoralPersonne(), request.params)\n\n request.dbsession.add(model)\n\n return Utils.get_data_save_response(Constant.SUCCESS_SAVE.format(ClientMoralPersonne.__tablename__))", "def create_company(list_of_data):\n ID = common.generate_random(list_of_data)\n user_input = ui.get_inputs(list_labels, title)\n user_input.insert(0, ID)\n list_of_data.append(user_input)\n with open(\"company/company_data.csv\",\"w\") as f:\n for i in range(len(list_of_data)):\n row = ','.join(list_of_data[i])\n f.write(row + '\\n')", "def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)", "def create_coll(request):\n form = CollForm(request)\n c = {}\n return render(request, \"browse/curate.html\", c)", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "def create(self, values):\n if values.get('country_id', False):\n country = self.env['res.country'].browse(values['country_id'])\n if country.code == 'SA':\n values.update({'is_saudi': True})\n else:\n values.update({'is_saudi': False})\n\n res = super(HrEmployee, self).create(values)\n if values.get('user_id', False):\n self.user_id.write({'employee_id': res})\n return res", "def employees(self) -> object:\n return self._employees", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def createEmployee():\n form = CreateEmployeeForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n staff = Staff(first_name=form.first_name.data, last_name=form.last_name.data, password=hashed_password, \n email=form.email.data, role=form.role.data, location=form.location.data)\n db.session.add(staff)\n db.session.commit()\n flash(f'Employee Added To Database', category='Success')\n return redirect(url_for('login'))\n return render_template('new_employee.html', title=\"Register\", form=form)", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n respond_data = self.controller.add_team_employee(request_data=request_data)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(respond_data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.EmployeeDoesNotExist,\n domain_exceptions.EmployeeIsATeamMember\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def test_employee_model(self):\n self.assertEqual(Employee.query.count(), 2)", "def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )", "def create(self, validated_data):\n admins = Group.objects.create(\n name=validated_data['name'] + ' Admins')\n accountants = Group.objects.create(\n name=validated_data['name'] + ' Accountants')\n validated_data['accountants'] = accountants\n validated_data['admins'] = admins\n company = super(CompanySerializer, self).create(validated_data)\n company.save()\n return company", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def _check_employee(self):\n\n for record in self:\n\n if record.nik_number:\n # find duplicate nik\n employee_ids = self.search([('id', 'not in', self.ids), ('nik_number', '=', record.nik_number)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Employee Identity Number.\")\n raise ValidationError(error_msg)\n\n # check nik format. it required base_indonesia\n if not record._check_nik(record):\n error_msg = _(\"NIK did not match with Company Code.\")\n raise ValidationError(error_msg)\n\n if record.identification_id:\n employee_ids = self.search([('id', 'not in', self.ids), ('identification_id', '=', record.identification_id)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Identification Number.\")\n raise ValidationError(error_msg)\n\n return True", "def perform_create(self, serializer):\n if self.request.data.get('user_type', None) == 'employee':\n serializer.save(is_staff=False)\n else:\n serializer.save()", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def create(self, values):\n\t\temployee_id = values.get('employee_id', False)\n\t\tprint(\"the val in the dict\", values)\n\t\tif (values.get('date_from') and values.get('date_to')) == False:\n\t\t\tcurrent = datetime.strftime(datetime.today().date(),'%Y-%m-%d')\n\t\t\tvalues.update({'allocate_date': current})\n\t\t\tprint(values)\n\t\tif not values.get('department_id'):\n\t\t\tvalues.update({'department_id': self.env['hr.employee'].browse (employee_id).department_id.id})\n\n\t\tholiday = super (Holidays, self.with_context (mail_create_nolog=True, mail_create_nosubscribe=True)).create(values)\n\t\tholiday.add_follower (employee_id)\n\n\t\t# Trilok code for policies\n\t\tpolicy_id = holiday.env['leaves.policy'].search(\n\t\t\t[('leave_type', '=', holiday.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])\n\t\t# print (\"policy iddddddddddddddd\",policy_id)\n\t\temp_type = holiday.employee_id.employee_type.id\n\t\tfor val in policy_id:\n\t\t\tif val.employee_type.id == emp_type:\n\t\t\t\tfor employee in holiday.employee_id:\n\t\t\t\t\tif holiday.type == 'remove':\n\t\t\t\t\t\tquery = '''select count(*) from hr_holidays where upper(type) = upper('rEMove')and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') and now()::date and employee_id = %s''' % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query)\n\t\t\t\t\t\tquery_result = holiday.env.cr.dictfetchone()\n\t\t\t\t\t\t# print(\"query_result\", query_result)\n\t\t\t\t\t\tif val.min_app_per_year > 0 and query_result[\"count\"] > val.min_app_per_year:\n\t\t\t\t\t\t\traise ValidationError(\"maximum number of applications per year is {} days\".format(val.min_app_per_year))\n\n\t\t\t\t\t\tquery1 = '''select create_date::date,date_to::date from hr_holidays where upper(type) = \n\t\t\t\t\t\tupper('rEMove') and upper(state) = upper('Validate') and create_date::date between to_date(concat(date_part('Year',now()::date),'-01-01'),'yyyy-mm-dd') \n\t\t and now()::date and employee_id = %s order by create_date desc limit 1'''\\\n\t\t\t\t\t\t\t\t % employee.id\n\t\t\t\t\t\tholiday.env.cr.execute(query1)\n\t\t\t\t\t\tquery_result1 = holiday.env.cr.fetchall()\n\t\t\t\t\t\tif query_result1 is not None:\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1)\n\t\t\t\t\t\t\t# print(\"query_resulttttttttttttttttttttttttttt\", query_result1[0][0], query_result1[0][1])\n\t\t\t\t\t\t\tcre_date = datetime.strptime(query_result1[0][0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdate_to = datetime.strptime(query_result1[0][1], '%Y-%m-%d')\n\t\t\t\t\t\t\t# print(\"cre_date\", cre_date, type(cre_date))\n\t\t\t\t\t\t\tcurrent_dt = fields.Datetime.now()\n\t\t\t\t\t\t\t# cdate=datetime.strptime(current_dt,'%Y-%m-%d')\n\t\t\t\t\t\t\tcurrent_date = datetime.strptime(current_dt.split(\" \")[0], '%Y-%m-%d')\n\t\t\t\t\t\t\tdays = (current_date - date_to).days\n\t\t\t\t\t\t\tif val.min_leave_app_gap > 0 and days > val.min_leave_app_gap:\n\t\t\t\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\t\t\t\"Minimum gap between two application should be atleast {} days\".format(\n\t\t\t\t\t\t\t\t\t\tval.min_leave_app_gap))\n\n\t\treturn holiday", "def test_create_bulk_academic(self):\n pass", "def register():\n add_employee = True\n form = RegistrationForm()\n if form.validate_on_submit():\n employee = Employee(email=form.email.data,\n username=form.username.data,\n glad_id=form.glad_id.data,\n tel_no=form.tel_no.data,\n role_id=2 , ##form.role_id.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(employee)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')", "def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )", "def test_access_employee(self):\n # Employee can't see any SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).read()\n # Employee can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).write({'team_id': self.company_data['default_sale_team'].id})\n # Employee can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_employee']).create({\n 'partner_id': self.partner_a.id,\n })\n # Employee can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).unlink()", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def test_new_model_creation(self):\n initial_count = Employer.objects.count()\n self.new_employer.save()\n new_count = Employer.objects.count()\n self.assertNotEqual(initial_count, new_count)\n\n self.name2 = 'employe223'\n self.new_employee = Employee(\n name=self.name2, employer=self.new_employer)\n self.new_employee.save()\n self.assertEqual(len(Employee.objects.all()), 1)", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def test_create_company_3(self):\n companies_data = [\n {\n \"_id\": \"sbucks\",\n \"headquarters\": \"Seattle\",\n \"name\": \"Starbucks Inc.\",\n },\n {\n \"_id\": \"salesforce\",\n \"headquarters\": \"Toronto\",\n \"name\": \"Salesforce Inc.\",\n },\n ]\n\n resp = self.app.post('/companies', data=json.dumps(companies_data),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTPStatus.CREATED)\n\n # cleanup\n for company in companies_data:\n del_resp = self.app.delete(f'/companies/{company[\"_id\"]}')\n self.assertEqual(del_resp.status_code, HTTPStatus.OK)", "def test_create_company_props_using_post(self):\n pass", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def test_get_all_companies(self):\n create_company()\n res = self.client.get(ALL_COMPANIES_LIST)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def create_person(self):", "def test_consultants_created(self):\n # Currently, there is just 1 Organization in the database, the org_existing\n org_existing = OrganizationFactory(name='Existing Organization')\n self.assertEqual(Organization.objects.count(), 1)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The CSV file mentions 1 consultant for the project_liaoning\n self.assertEqual(set(project_liaoning.consultants.all()), set([org_existing]))\n for project in [project_ouessant1, project_ouessant2]:\n self.assertEqual(project.consultants.count(), 0)", "def create_company(self):\n self.driver.get(f'{self.base_url}/company-register')\n\n # Fill the company name\n enter_random_string = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'companyName')))\n enter_random_string.send_keys(self.random_string)\n\n # Press \"Save and Continue\"\n self.driver.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/div[2]/div/div[2]/div[2]/div[2]/div/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)", "def get_companies(request):\n companies = Company.objects.all()\n context={'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)" ]
[ "0.72610486", "0.6829904", "0.6690067", "0.6635526", "0.6073183", "0.60264933", "0.6014927", "0.5973413", "0.5951959", "0.5936964", "0.59301126", "0.578499", "0.57461524", "0.5707354", "0.56899834", "0.5680803", "0.56750655", "0.56400555", "0.5560561", "0.55447835", "0.55424845", "0.55176455", "0.55141896", "0.5499269", "0.5493136", "0.54692084", "0.5468984", "0.545608", "0.544112", "0.54403776", "0.54360574", "0.5435608", "0.5414263", "0.5351661", "0.534456", "0.5337683", "0.5334408", "0.53251547", "0.53179127", "0.53114253", "0.5307227", "0.52977586", "0.5289643", "0.5275061", "0.52655655", "0.524389", "0.5233132", "0.5208989", "0.52043855", "0.52013654", "0.5200853", "0.5189117", "0.5185018", "0.51836246", "0.5179382", "0.5158975", "0.5150256", "0.5146443", "0.5122766", "0.51175183", "0.510071", "0.5085301", "0.50760466", "0.50634027", "0.5055401", "0.5041521", "0.50407606", "0.5040443", "0.50391966", "0.5036953", "0.50327545", "0.5018312", "0.5015283", "0.49942437", "0.49851015", "0.49850887", "0.49780467", "0.4963211", "0.49530244", "0.49491492", "0.49472818", "0.4944084", "0.4941188", "0.49403805", "0.4922321", "0.4918772", "0.49142382", "0.49111414", "0.4908037", "0.49067014", "0.48956642", "0.48885483", "0.4888194", "0.48860955", "0.48826247", "0.48824325", "0.48765534", "0.4871511", "0.48705098", "0.48658463" ]
0.67414385
2
View for editing employee
def edit_employee(request, employee_id): employee = Employee.objects.get(pk=int(employee_id)) current_employee = Employee.objects.get(user__pk=request.user.pk) assert isinstance(employee, Employee) assert isinstance(current_employee, Employee) # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: # raise PermissionDenied() if not current_employee.hasAccessTo(employee): raise PermissionDenied() form = EditEmployeeForm(request.user, employee, { 'first_name': employee.user.first_name, 'last_name': employee.user.last_name, 'email': employee.user.email, 'manager': employee.manager.id if employee.manager else 0, 'language_code': employee.language_code, # 'development_plan_type': employee.development_plan_type.id, 'is_manager': employee.is_manager }) if 'manager' in form.fields: managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk) form.fields['manager'].queryset = managerQS # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter( # Q(company__pk=employee.company.pk) | Q(company__isnull=True) # ) is_me = employee.user.pk == request.user.pk return TemplateResponse( request, 'mus/edit_employee_form.html', { 'edit_employee_form': form, 'employee_id': employee_id, 'me': is_me, 'name': employee.user.get_full_name() } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))", "def edit_person(self, pk):", "def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})", "def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def edit(self, **kwargs):\n ...", "def edit(self):\n\n pass", "def edit_employee_hours(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_Hours_Form(obj = employee)\n\n if form.validate_on_submit():\n \n employee.completed = form.completed.data, \n employee.required = form.required.data,\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_hours.html\", employee = employee, form = form)", "def task_edit(request, pk):\n task_manager = TaskManager.objects.get(id=pk)\n task = task_manager.task\n if request.method == 'POST':\n \ttask_form = TaskForm(request.POST)\n \ttask_owner = request.user\n\n \tif task_form.is_valid():\n \t\ttask_name = task_form.cleaned_data.get('task_name')\n \t\ttask_description = task_form.cleaned_data.get('task_description')\n\n \t\tif task_manager.task_owner == task_owner:\n \t\t\ttask.task_name = task_name\n \t\t\ttask.task_description = task_description\n \t\t\ttask.save()\n \t\t\treturn redirect('task_list')\n else:\n \tform = TaskForm(instance=task)\n\n context = {'form': form, 'task_manager':task_manager}\n return render(request, 'tasker/task_edit.html', context)", "def edit_form():\n return template (\"edit\")", "def editar_empresa(id):\n cadastrando_empresa = False\n\n empresa = Empresa.query.get_or_404(id)\n form = EditarEmpresaForm(obj=empresa)\n\n if form.validate_on_submit():\n empresa.nome = form.nome.data\n empresa.simbolo = form.simbolo.data\n empresa.regiao = form.regiao.data\n empresa.tipo = form.tipo.data\n empresa.abertura = form.abertura.data\n empresa.fechamento = form.fechamento.data\n empresa.zona = form.zona.data\n empresa.moeda = form.moeda.data\n db.session.commit()\n flash('Empresa editada com sucesso!')\n\n return redirect(url_for('home.listar_empresas'))\n\n form.nome.data = empresa.nome\n form.simbolo.data = empresa.abertura \n form.regiao.data = empresa.regiao\n form.tipo.data = empresa.tipo\n form.abertura = empresa.abertura\n form.fechamento = empresa.fechamento\n form.zona.data = empresa.zona\n form.moeda.data = empresa.moeda\n\n\n return render_template('home/empresa.html', action=\"Edit\",\n cadastrando_empresa=cadastrando_empresa, form=form,\n empresa=empresa, title=\"Editar empresa\")", "def UpdateEmployee():\n staff = current_user\n form = UpdateEmployeeForm()\n if form.validate_on_submit():\n staff.first_name=form.first_name.data.lower()\n staff.last_name=form.last_name.data.lower()\n staff.email=form.email.data\n staff.location=form.location.data\n db.session.commit()\n flash(f'Employee Updated', category='Success')\n elif request.method == 'GET':\n form.first_name.data=staff.first_name.capitalize()\n form.last_name.data=staff.last_name.capitalize()\n form.email.data=staff.email\n form.role.choices=[staff.role]\n form.location.data=staff.location\n return render_template('update_employee.html', title=\"Update Employee\", form=form)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def employee():\n return Response(render_template('employee/employee.html'))", "def edit_register(id):\n add_employee = False\n\n employee = Employee.query.get_or_404(id) #from table\n print('----update 1----')\n form = UpdateForm(obj=employee) #if not 404\n print('----update 2----')\n if form.validate_on_submit():\n employee.email = email=form.email.data\n employee.username=form.username.data\n employee.glad_id=form.glad_id.data\n employee.tel_no=form.tel_no.data\n employee.role_id=form.role_id.data\n employee.password=form.password.data\n\n # UPDATE employee to the database\n print('----update----',employee.role_id)\n db.session.commit()\n flash('You have successfully updated! ')\n\n # # redirect to the login page\n # return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Update')", "def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)", "def edit(request, company_id=None):\n if company_id:\n company = get_object_or_404(Company, id=company_id)\n if request.POST and company.owner == request.user:\n form = CompanyForm(request.POST, instance=company)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/companies')\n if company.owner != request.user:\n return HttpResponseForbidden()\n form = CompanyForm(instance=company)\n context = dict(form=form)\n return render(request, 'companies/edit.html', context)\n else:\n companies = Company.objects.filter(owner=request.user)\n context = dict(companies=companies)\n return render(request, 'companies/companies_by_user.html', context)", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def edit_employee_locations(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Loc_Form(obj = employee)\n \n form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n \n if form.validate_on_submit():\n \n location = Location.query.get(form.location.data) \n employee.locations.append(location)\n db.session.add(employee)\n \n db.session.commit()\n\n \n\n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n \n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def edit(request, pk):\n template_var = base_template_vals(request)\n user = template_var[\"u\"]\n event = Event.objects.get(id=pk)\n template_var[\"e\"] = event\n if user.is_superuser or user.is_moderator:\n if request.method == 'POST':\n title = request.POST['title']\n refer = request.POST['refer']\n date = request.POST['date']\n time = request.POST['time']\n loc = request.POST['loc']\n body = request.POST['body']\n \n # Deal with time field\n try:\n event_datetime = date + ' ' + time\n print event_datetime\n event_datetime = datetime.strptime(event_datetime, '%m/%d/%Y %H:%M')\n except ValueError:\n print \"Error when processing time field\"\n \n # Deal with tags checkbox list\n tags = request.POST.getlist(\"tags\") \n if len(tags) == 0:\n event.tags.add(\"untagged\")\n else:\n taglist = list(tags)\n for t in taglist:\n event.tags.add(t)\n \n event.title = title\n event.refer = refer\n event.event_time = event_datetime\n event.location = loc\n event.body = body\n event.save() \n return single(request, pk)\n return render_to_response(\"event/event_edit.html\", template_var,\n context_instance=RequestContext(request))\n else :\n return redirect('index')", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)", "def edit(self, id, *args, **kw):\n atras = \"/rolesplantilla/\"\n if (not kw['contexto']):\n redirect('../')\n elif (kw['contexto'] == \"proyecto\"):\n selector = SelectorPermisosPlantillaProy\n elif (kw['contexto'] == \"fase\"):\n selector = SelectorPermisosPlantillaFase\n elif (kw['contexto'] == \"ti\"):\n kw[\"contexto\"] = u\"Tipo de Ítem\"\n selector = SelectorPermisosPlantillaTi\n \n self.edit_form = RolPlantillaEditForm(DBS=DBSession, selector=selector) \n tmpl_context.widget = self.edit_form\n rol_plantilla_edit_form = self.edit_form\n \n \n page=u\"Editar Rol Plantilla de {contexto}\".format(contexto=kw['contexto'])\n \n value = self.edit_filler.get_value(values={'id_rol': int(id)})\n \n #agregado\n if value[\"tipo\"].find(\"Plantilla\") < 0:\n page=u\"Editar Rol de {contexto}\".format(contexto=kw['contexto'])\n atras = \"/roles/\"\n \n return dict(value=value, page=page, atras=atras)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def edit():", "def document_edit_view(document_id):\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n return render_template('admin/documents/edit.html', document=doc, path='/admin/documents')", "def get_edit_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv", "def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )", "def edit_user(request, username):\n context = {}\n detail = IMPUser.objects.all().filter(username = username)\n if detail:\n context = {'username':username,\n 'display_name':detail[0].display_name,\n 'tel':detail[0].tel,\n 'mobile':detail[0].mobile,\n 'office':detail[0].office,\n 'num':detail[0].num}\n return render(request, \"account/edit_user.html\", context)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"users/edit_user.html\", user=user)", "def office_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n office_reference = get_object_or_404(Office, id=id,company=company)\n office_form = OfficeForm(instance=office_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('office_form.html',{'form':office_form, 'info': office_reference},context_instance=RequestContext(request))\n else:\n office_form = OfficeForm(request.POST, instance=office_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if office_form.is_valid():\n office_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'info': office_reference},\n context_instance=RequestContext(request))", "def update(self, request, pk):\n serializer = data_serializers.UpdateEmployeeRequestSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_employee_entity = self.controller.update_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.ObjectEntityDoesNotExist\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def edit(request, pk):\n\n try:\n object = User.objects.get(pk=pk)\n except:\n object = User()\n\n if request.method == 'POST': # If the form has been submitted...\n form = UserForm(request.POST, instance=object)\n\n if form.is_valid(): # If the form is valid\n object = form.save()\n\n messages.success(request, _('The user has been saved.'))\n\n return redirect('users.views.list')\n else:\n form = UserForm(instance=object)\n\n return render(request, 'users/users/edit.html', {'form': form})", "def careerCatagory_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n page_index = tool.get_param_by_request(request.GET, 'page_index', 1, int)\r\n\r\n careerCatagory = None\r\n if action == \"edit\" or action == \"show\":\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n careerCatagory = api_careerCatagory.get_career_catagory_by_id(_id)\r\n\r\n if careerCatagory.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n\r\n careerCatagory = careerCatagory.result()[0]\r\n\r\n c = {\"careerCatagory\": careerCatagory, \"action\": action, \"page_index\": page_index}\r\n\r\n return render_to_response(\"mz_course/careerCatagory_save.html\", c, context_instance=RequestContext(request))", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def change_employee(self, employee):\n cursor = self.dbconnect.get_cursor()\n try:\n if employee.id == None:\n raise Exception('no id given')\n cursor.execute('select * from employee where employeeID=%s', (str(employee.id),))\n if cursor.rowcount == 0:\n raise Exception('no employee found with that id')\n cursor.execute(\n 'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s',\n (employee.name, employee.email, employee.office, employee.title,\n employee.internOrExtern, employee.active, employee.promotor, employee.id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise Exception('unable to change employee')", "def profile_page(cls, employee_id, logger=None):\n if logger is None:\n logger = cls._logger\n\n database_connection = DatabaseConnection(f\"employees.csv\")\n table = database_connection.table\n employee = Employee(employee_id)\n\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n\n while True:\n\n choice = input(\n \"Please choose: \"\n \"(1) check data, \"\n \"(2) update first name, \"\n \"(3) update last name, \"\n \"(4) save changes, \"\n \"(5) exit without saving \"\n )\n if choice not in ('1', '2', '3', '4', '5'):\n logger.log(\"Please pick a valid choice\")\n elif choice=='1':\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n elif choice=='2':\n first_name = input(\"Enter your first name: \")\n employee.set_first_name(first_name)\n elif choice=='3':\n last_name = input(\"Enter your last name: \")\n employee.set_last_name(last_name)\n elif choice=='4':\n table[\n (table['employee_id']==employee.get_employee_id())\n ] = pd.Series(\n {'employee_id': employee.get_employee_id(),\n 'first_name': employee.get_first_name(),\n 'last_name': employee.get_last_name(),\n }\n )\n database_connection.overwrite()\n logger.log(\"Information saved!\")\n else:\n break", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template('edit-user.html', user=user)", "def edit_attendance(request, attendance_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tattendance = models.Attendance.objects.filter(\n\t\tpk=attendance_id, soft_delete=False\n\t).first()\n\tprint(\"1\")\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t'attendance_id': attendance_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tobtained = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tif not student:\n\t\t\tcontext_dict[\"message\"] = 'Student at does not exist / Roll number has not been alloted.'\n\t\t\treturn render(request, \"editAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tif attendance.student != student:\n\t\t\t\tattendance.student = student\n\t\t\t\tupdate_fields.append('student')\n\t\t\t\tactivity += 'Changed student to ' + str(student) + '.\\n'\n\t\t\tif attendance.total_attendance != total:\n\t\t\t\tattendance.total_attendance = total\n\t\t\t\tupdate_fields.append('total_attendance')\n\t\t\t\tactivity += 'Changed total attendance to ' + str(total) + '.\\n'\n\t\t\tif attendance.obtained_attendance != obtained:\n\t\t\t\tattendance.obtained_attendance = obtained\n\t\t\t\tupdate_fields.append('obtained_attendance')\n\t\t\t\tactivity += 'Changed obtained attendance to' + str(obtained) + '.\\n'\n\t\t\tif str(attendance.subject.pk) != str(subject):\n\t\t\t\tattendance.subject = models.Subject.objects.get(pk=subject)\n\t\t\t\tupdate_fields.append('subject')\n\t\t\t\tactivity += 'Changed subject to ' + str(subject) + '.\\n'\n\t\t\tattendance.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_attendance_info(attendance))\n\tfor i in context_dict['subjects']:\n\t\t# use for dynamic\n\t\ttry: del context_dict['all_subjects'][i]\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-attendance')\n\treturn render(\n\t\trequest, \"editAttendance.html\", context_dict\n\t)", "def show_employee_edit_form(self, staff_ob, number):\n\n print(self.LENGTH_STAR * \"*\")\n print(f\"EDIT {staff_ob.role.upper()}\\n\")\n\n if number == 1:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s address\\nThe current address is: {staff_ob.address}\")\n new_address = self.get_address()\n while new_address == False:\n new_address = self.get_address()\n self.check_action_edit_form(staff_ob, number, new_address)\n\n elif number == 2:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s mobile number\\nThe current mobile number is: {staff_ob.mobile_number}\")\n new_mobile_number = self.get_mobile_number()\n while new_mobile_number == False:\n new_mobile_number = self.get_mobile_number\n self.check_action_edit_form(staff_ob, number, new_mobile_number)\n \n elif number == 3:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s email\\nThe current the email is: {staff_ob.email}\")\n new_email = self.get_email()\n while new_email == False:\n new_email = self.get_email()\n self.check_action_edit_form(staff_ob, number, new_email)\n \n print(f\"\\n{staff_ob.name}'s information successfully changed!\\n\")\n \n return", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def edit_task_page(request):\n data = {}\n try:\n tasklist = request.GET.get(\"tasklist\")\n task = request.GET.get(\"task\")\n data[\"tasklist\"] = tasklist\n\n task_obj = Todo.objects.get(title=task)\n data[\"data\"] = task_obj\n\n return render(request, \"pages/update-task.html\", data)\n except Exception as ex:\n return HttpResponse(ex)", "def get(self,request,*args,**kwargs):\n\t\tuser_form = UserUpdateForm(instance=request.user)\n\t\tpersona_form = PersonaUpdateForm(instance=request.user.persona)\n\t\tuser_password_update_form = UserPasswordUpdateForm(user=request.user)\n\n\t\tcontext = {\n\t\t'user_form':user_form,\n\t\t'persona_form':persona_form,\n\t\t'user_password_update_form':user_password_update_form\n\t\t}\n\t\treturn render(request, 'cuenta/editar.html', context)", "def edittask_view(request, task_id):\n\n # Use to tell to the template tha the user want to edit an already existing task\n is_new = False\n\n # Retrieve the task, raise an error if the task does not exist\n task = get_object_or_404(Task, id=task_id)\n project = task.projet\n # Check if logged in user is allowed to modify the task\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Check if the form has been submitted\n if request.method == \"POST\":\n form = TaskForm(project, request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n # Manually set the project id. Otherwise a new task would be created\n task.id = task_id\n task.last_modification = datetime.datetime.now()\n task.save()\n\n return redirect(\"task\", task_id=task.id)\n else:\n # Initialize the form with the task\n form = TaskForm(project, instance=task)\n else:\n return redirect(\"projects\")\n return render(request, \"newtask.html\", locals())", "def edit(request, article_id):\n try:\n article = Article.objects.get(pk=article_id)\n except Article.DoesNotExist:\n raise Http404(\"Article does not exist\")\n if request.method == 'POST': # フォームが提出された\n form = ArticleForm(request.POST, instance = article) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n article = form.save(commit=False)\n if form.cleaned_data['no_expired_at'] is True:\n article.expired_at = None\n article.save()\n return HttpResponseRedirect(reverse('article_list')) # POST 後のリダイレクト\n else:\n no_expired_at = False\n if article.expired_at is None:\n no_expired_at = True\n article.expired_at = datetime.now() + timedelta(days=1)\n form = ArticleForm(instance = article, initial = {'no_expired_at': no_expired_at, }) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/article_edit.html', { \n 'form': form,\n 'title':'ニュース記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新する',\n 'article_pk':article.pk,\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def edit(request,entry_id):\n assert isinstance(request, HttpRequest)\n try:\n entry = Entry.objects.get(pk=entry_id)\n except Entry.DoesNotExist:\n raise Http404(\"指定されたブログが存在しません。\")\n if not request.user or request.user.pk != entry.member.pk: # ブログ作成者以外は編集できない\n return HttpResponseForbidden() #アドレスをコピペしなければ通常は起こらないため例外処理で済ませておく。\n\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST, instance = entry) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n form.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm(instance = entry) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新',\n 'entry_pk':entry.pk,\n 'current_user':request.user,\n })", "def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)", "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def edit(tesserae, tessera_id):\n try:\n return tesserae.edit(tessera_id)\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\", str(e))\n return False", "def edit_parterre(id):\n parterre = get_parterre(id)\n form = ParterreForm(parterre)\n return render_template(\"create-parterre.html\",\n title= parterre.get_name()+\" - edit\",\n form = form,\n parterre = parterre,\n param = \"modif\")", "def edit(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n form = EditForm(obj=user)\n form.email.data = user.email\n\n if form.validate_on_submit():\n password = form.password.data\n username = form.username.data\n\n save_result = edit_user(user_id, password, username, user.active)\n user = save_result['entry']\n form = EditForm(request.form, obj=save_result['entry'])\n form.email.data = user.email\n return redirect(url_for('.index'))\n \n return render_template('users/edit.html'\n ,form=form\n ,user=user\n ,t=t\n ,m=m)", "def show_edit_user_form(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('edit_user.html', user=user)", "def edit(request, observation_id, summary_id):\n\n if request.method == 'POST':\n if observation_id and summary_id:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n form = Observation(request.POST,instance=o)\n else:\n form = Observation(request.POST)\n if form.is_valid():\n form.save()\n return render_to_response(\"obsform_form.html\",\n {'form': form,\n 'success' : 'Your observation was saved'},\n context_instance=RequestContext(request))\n else:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n\n form = Observation(instance=o)\n\n return render_to_response('obsform_form.html', {'form' : form},\n context_instance=RequestContext(request))", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def edit(article_id):\r\n response = table.get_item(\r\n Key={'article_id': article_id}\r\n )\r\n data = response.get('Item')\r\n\r\n if data is None:\r\n flash('Unable to get Article')\r\n return redirect(url_for('article.list'))\r\n\r\n form = ArticleForm(title=data.get('title'), description=data.get('description'))\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = article_id\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb to update table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully updated')\r\n return redirect(url_for('article.list'))\r\n \r\n return render_template('article/form.html', add_article=False,\r\n form=form, title='Edit Article', article_id=article_id)", "def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)", "def show_edit_user_form(user_id):\r\n user = User.query.get_or_404(user_id)\r\n\r\n return render_template('edit-user.html', user=user)", "def object_edit(request, simulation, object_name):\n # Object is either 'centroid', 'crossing', 'link' or 'function'.\n # Create a formset to edit the objects.\n formset = gen_formset(object_name, simulation)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n 'formset': formset,\n }\n return render(request, 'metro_app/object_edit.html', context)", "def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n \n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def task_excellent_works_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n task_id = tool.get_param_by_request(request.GET, 'taskId', 0, int)\r\n\r\n c = None\r\n if \"edit\" in action and task_id:\r\n task_excellent_works = api_taskExcellentWorks.select_task_excellent_works_by_task_id(task_id)\r\n c = {\"taskExcellentWorks\": task_excellent_works.result(), \"action\": action}\r\n\r\n if task_excellent_works.is_error():\r\n return render_to_response(\"404.html\", {}, RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/task/taskExcellentWorks_list.html\", c, RequestContext(request))\r\n\r\n if \"edit\" in action and (not task_id):\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n task_excellent_works = api_taskExcellentWorks.select_task_excellent_works_by_id(_id)\r\n c = {\"taskExcellentWorks\": task_excellent_works.result()[0], \"action\": action}\r\n\r\n if task_excellent_works.is_error():\r\n return render_to_response(\"404.html\", {}, RequestContext(request))\r\n\r\n if 'add' in action:\r\n c = {\"task_id\": task_id, \"action\": action}\r\n\r\n return render_to_response(\"mz_course/task/taskExcellentWorks_edit.html\", c, RequestContext(request))", "def edit_employee_certifications(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Cert_Form(obj = employee)\n \n form.cert.choices = db.session.query(Cert.id , Cert.cert_name).all()\n \n \n if form.validate_on_submit():\n \n cert = Cert.query.get(form.cert.data) \n \n\n if cert.expire:\n received = form.received.data\n year = received.year\n month = received.month\n day = received.day\n\n start_date = datetime(year = year, month = month, day = day)\n change_unit = cert.good_for_unit\n change_time = cert.good_for_time\n \n if change_unit == \"days\": \n delta = timedelta(days = change_time)\n elif change_unit == \"weeks\":\n delta = timedelta(days = change_time * 7)\n elif change_unit == \"months\":\n delta = timedelta(days = change_time * 30)\n else:\n delta = timedelta(days = change_time * 365)\n\n due_date = start_date + delta\n employees = employee_certification(employee_id = employee_id, cert_id = cert.id, received = received, due_date = due_date)\n \n #cert.employees.append(employee))\n #db.session.add(cert)\n #employee.certs.append(dates)\n db.session.add(employees)\n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)", "def edit_record(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member.\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member,\n project = get_object_or_404(models.Project, slug=slug)\n record = get_object_or_404(models.Record, pk=pk)\n pm = ProjectMember.objects.get(user=request.user, project=project)\n\n # Access control.. if not owner or editor - access denied.\n if pm.is_owner or pm.is_editor:\n # User has access\n if request.method == 'POST':\n # User submits data\n form1 = forms.GeneralRecordForm(request.POST)\n form2 = forms.SpecificRecordForm(request.POST, entry=request.POST['entry_type'])\n context = {\n 'form1':form1,\n 'project':project,\n 'form':form2,\n }\n if form2.is_valid() and form1.is_valid():\n fields = [f.name for f in models.Record._meta.get_fields()]\n data1 = form1.clean()\n data2 = form2.clean()\n # Additional form validation.\n if data1['entry_type'] == 'book':\n if data2['author']== '' and data2['editor'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Author or Editor\"\n return render(request, 'records/record_edit.html', context)\n elif data1['entry_type'] == 'inbook':\n if data2['author'] == '' and data2['editor'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Author or Editor\"\n return render(request, 'records/record_edit.html', context)\n elif data2['chapter'] == '' and data2['pages'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Chapter or Pages\"\n return render(request, 'records/record_edit.html', context)\n # Form is valid .. save into new record\n # making sure no one has edited the record while session is running\n if record.last_edited.__str__() == request.COOKIES.get('last_edited'):\n # No conflict, go on save changes.\n record.entry_type = data1['entry_type']\n record.cite_key = data1['cite_key']\n record.project = project\n for fieldname in fields:\n if fieldname in data2:\n setattr(record, fieldname, data2[fieldname])\n record.last_edited = timezone.now()\n record.save()\n # Send user back to project detail, the overview of all records in the project.\n return redirect('projects:single', slug=slug)\n else:\n # someone changed the record before the user managed to save\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'old_record':record,\n 'form1':form1,\n 'project':project,\n 'form':form2,\n 'data':data\n }\n # send user to the conflict page.\n return render(request, 'records/record_conflict.html', context)\n\n else:\n # Form is not valid\n context = {\n 'form1':form1,\n 'project':project,\n 'form':form2,\n 'err':True\n }\n return render(request, 'records/record_edit.html', context)\n else:\n # User hasn't submitted any data yet\n # Form filled in with data for selected record.\n form1 = forms.GeneralRecordForm(data=model_to_dict(record))\n form2 = forms.SpecificRecordForm(data=model_to_dict(record),entry=record.entry_type)\n context = {\n 'form1':form1,\n 'form2':form2,\n 'project':project,\n 'record':record\n }\n # Create response in order to set cookie\n response = render(request, 'records/record_edit.html', context)\n # set cookie to enable later check for conlfict\n response.set_cookie('last_edited', record.last_edited.__str__())\n return response\n else:\n # Access denied.\n return HttpResponse(\"You don't have the permission to do this\")", "def timesheet_edit_form(request, type, id):\r\n if type == 'timesheet':\r\n timesheet = TimeSheet.objects.get(pk=int(id))\r\n editForm = TimeSheetForm(\r\n initial = {\r\n 'dueDate':timesheet.DueDate,\r\n 'hours':timesheet.Hours,\r\n 'partner':timesheet.Partner,\r\n 'project':timesheet.Project,\r\n 'phase':timesheet.Phase,\r\n 'activity':timesheet.Activity\r\n })\r\n else:\r\n timesheet = InternalTimeSheet.objects.get(pk=int(id))\r\n editForm = InternalForm(\r\n initial = {\r\n 'dueDate':timesheet.InternalDueDate,\r\n 'hours':timesheet.Hours,\r\n 'internal':timesheet.Internal,\r\n 'activity':timesheet.Activity\r\n })\r\n return render(\r\n request,\r\n 'timesheet/forms/edit.html',\r\n {\r\n 'editForm':editForm,\r\n 'type':type,\r\n 'timesheet':timesheet\r\n })", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def edit(request):\n if 'image_id' not in request.GET:\n return HttpResponseRedirect('/imgmanip')\n image_id = request.GET['image_id']\n image = get_object_or_404(Image, pk=image_id)\n return render(request, 'imgmanip/edit.html', {'image': image, 'image_id': image_id})", "def getEditForm( self ):\n return \"listc_edit\"", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def edit_car_view():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n car_id = request.args.get('car-id', None)\n edit_mode_string = request.args.get('edit', None)\n if edit_mode_string == 'true':\n edit_mode = True\n else:\n edit_mode = False\n car = get_car_identified_by_id(car_id)\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('cars_manager.html', user=user_id, session_id=session_id, car=car, edit_mode=edit_mode,\n current_year=get_current_year())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def view_user_edit(self):\n\n logged_in = authenticated_userid(self.request)\n message = ''\n form = Form(self.request, schema=UserEditSchema,\n state=State(request=self.request))\n if form.validate():\n password = self.request.params['password']\n if self.context.validate_password(password):\n if self.request.params['new_password']:\n password = self.request.params['new_password']\n message = 'Successfully saved'\n email = self.request.params['email']\n self.context.edit(password, email)\n else:\n message = msg['password_invalid']\n return {\n 'message': message,\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n 'form': FormRenderer(form),\n 'email': self.context.email\n }", "def register_edit_view(self, blueprint):\n view = apply_decorators(self.edit_view, self.edit_decorators)\n blueprint.add_url_rule(\n self.edit_rule, self.edit_endpoint, view, methods=['GET', 'POST'])", "def edit(self, name=UNSPECIFIED, extraParams={}):\n import labstep.entities.resource.repository as resourceRepository\n\n return resourceRepository.editResource(self, name, extraParams=extraParams)", "def edit_user(self, username, employee, role, status, change_pwd=False, *password):\n self.click(self.user_edit_save_btn)\n self.set_combox_value(role, self.user_role_select)\n self.input_text(employee, self.emp_name_input)\n self.input_text(username, self.user_name_input)\n self.set_combox_value(status, self.user_status_select)\n if change_pwd:\n self.click(self.change_password)\n self.input_text(password, self.user_password_input)\n self.input_text(password, self.user_confirm_password)\n self.click(self.user_edit_save_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"User is edited and saved.\")", "def showEditContact(self):", "def edit_task(self,tid, **kwargs):\n self.task_controller.edit(tid, **kwargs)", "def view_edit_pet(id):\n pet = Pet.query.get_or_404(id)\n form = PetEditForm(obj=pet)\n if form.validate_on_submit():\n form.populate_obj(pet)\n db.session.commit()\n\n flash(f\"Updated {pet.species} named {pet.name}\")\n return redirect(f'/{id}')\n else:\n return render_template(\"pet_edit_form.html\", form=form, pet=pet)", "def edit_plante(id):\n plante = get_plante(id)\n form = PlanteForm(plante)\n return render_template(\n \"create-plante.html\",\n title = plante.get_name()+\" - edit\",\n form = form,\n plante = plante,\n param = \"modif\")", "def edit_announcement():\n # Implement me!\n\n announcement = get_announcement(request.vars.announcement_id, auth.user.email)\n\n announcement.description = request.vars.description\n announcement.name = request.vars.name\n announcement.updated_on = datetime.datetime.utcnow()\n announcement.update_record()\n return response.json(announcement)", "def edit(self, *args, **kw):\n pp = PoseePermiso('modificar rol')\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value(values={'id_rol': int(args[0])})\n page = \"Rol {nombre}\".format(nombre=value[\"nombre_rol\"])\n atras = self.action\n return dict(value=value, page=page, atras=atras)", "def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def edit(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n title = self.request.get('title')\n if not title:\n return JSONResponse(self.request).error(\n _('agenda_item_update_empty_string',\n default=u\"Agenda Item title must not be empty.\")).proceed().dump()\n\n title = title.decode('utf-8')\n if self.agenda_item.has_proposal:\n if len(title) > ISubmittedProposal['title'].max_length:\n return JSONResponse(self.request).error(\n _('agenda_item_update_too_long_title',\n default=u\"Agenda Item title is too long.\")\n ).proceed().dump()\n\n self.agenda_item.set_title(title)\n return JSONResponse(self.request).info(\n _('agenda_item_updated',\n default=u\"Agenda Item updated.\")).proceed().dump()", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def update_employee(emp_id, key=None, value=None, items=None):\n if items is None:\n if key is None or value is None:\n return {\"Error\": \"At least one key/value pair is required\"}\n items = {key: value}\n elif isinstance(items, str):\n items = salt.utils.yaml.safe_load(items)\n\n xml_items = \"\"\n for pair in items:\n xml_items += '<field id=\"{}\">{}</field>'.format(pair, items[pair])\n xml_items = \"<employee>{}</employee>\".format(xml_items)\n\n status, result = _query(\n action=\"employees\",\n command=emp_id,\n data=xml_items,\n method=\"POST\",\n )\n\n return show_employee(emp_id, \",\".join(items.keys()))", "def edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def edit_show_user(user_id):\n edited_user = User.query.get_or_404(user_id)\n\n edited_user.first_name = request.form['first_name']\n edited_user.last_name = request.form['last_name']\n edited_user.image_url = request.form['image_url']\n\n db.session.add(edited_user)\n db.session.commit()\n\n return redirect('/')", "def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)", "def restaurants_edit(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n # Find the restaurant\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n # Only edit if the entry was re-written\n if len(request.form['address']) > 0:\n restaurant.address = request.form['address']\n if len(request.form['phone']) > 0:\n restaurant.phone = request.form['phone']\n if len(request.form['web']) > 0:\n restaurant.web = helper.check_restaurant_URL(request.form['web'])\n if len(request.form['tag_line']) > 0:\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n helper.delete_restaurant_tag_pairs(restaurant.id)\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, restaurant.id)\n if len(request.form['description']) > 0:\n restaurant.description = request.form['description']\n\n restaurant.last_update = datetime.utcnow()\n\n session.add(restaurant)\n session.commit()\n flash(\"Restaurant {} edited!\".format(restaurant.name))\n return redirect(url_for('restaurants_page'))\n else:\n # Get user info if the user is signed in to render edit form\n user_info = helper.get_user_if_exists(login_session)\n tag_rest_list = session.query(RestaurantTags).filter_by(restaurant_id=restaurant.id).all()\n tag_line = ''\n # Create a tag line - by compiling the string tag_name for each tag\n for pair in tag_rest_list:\n tag = session.query(Tags).filter_by(id=pair.tag_id).first()\n tag_line += tag.tag_name + ', '\n return render_template('editrestaurant.html',\n restaurant=restaurant,\n tag_line=tag_line,\n user_info=user_info)", "def edit(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"../\"\n \n pp = PoseePermiso('redefinir tipo item',\n id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value( \\\n values={'id_atributos_por_tipo_item': int(args[0])})\n value['_method'] = 'PUT'\n page = \"Atributo {nombre}\".format(nombre=value[\"nombre\"])\n return dict(value=value, \n page=page, \n atras=url_action)", "def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()" ]
[ "0.7485564", "0.6978975", "0.6902276", "0.6840151", "0.6783405", "0.6756372", "0.66953164", "0.66938245", "0.66192317", "0.65741867", "0.65598595", "0.65310377", "0.6502672", "0.6400611", "0.6391832", "0.6368707", "0.63276947", "0.6300049", "0.6295493", "0.62654054", "0.6208546", "0.61726767", "0.612466", "0.6097169", "0.60935307", "0.60688716", "0.6059109", "0.60383636", "0.60363305", "0.60226", "0.5994653", "0.5963026", "0.5960732", "0.5960345", "0.59505135", "0.5939778", "0.5937479", "0.59166807", "0.590668", "0.59066594", "0.5895124", "0.5894537", "0.5888926", "0.5883049", "0.587729", "0.5875536", "0.58752984", "0.5864216", "0.586189", "0.58608603", "0.58534545", "0.58370405", "0.5814373", "0.58103514", "0.58078444", "0.58021176", "0.57754695", "0.575377", "0.57504547", "0.57320106", "0.5731802", "0.57061154", "0.56967896", "0.5689032", "0.56888235", "0.568697", "0.5680074", "0.5679539", "0.56764007", "0.5666772", "0.5666127", "0.5663753", "0.5661273", "0.56497574", "0.56480974", "0.5638638", "0.56358075", "0.5626291", "0.56168205", "0.5602326", "0.5589686", "0.5573791", "0.5568207", "0.556752", "0.5548574", "0.5548346", "0.5544907", "0.55442804", "0.5540486", "0.5536612", "0.55349", "0.55232525", "0.5515783", "0.5509622", "0.55068165", "0.549196", "0.5485232", "0.5484495", "0.5482955", "0.5475598" ]
0.7715588
0
View of dashboard containing overview of relevant information
def dashboard(request): employee = request.user.employee_user.first() widgets = list() # development_plans = employee.getDevelopmentPlans() if employee.is_manager: widgets.append(dict( # template="mus/_widget_waiting_developmentplans.html", data=employee.getMyEmployees(), # title=_('Expecting preparation guides from') )) widgets.append(dict( # template="mus/_widget_todo_developmentplans.html", data=employee.getMyEmployees(), # title=_('Preparation guides to do') )) # widgets.append(dict( # template = "mus/_widget_my_developmentplans.html", # data = development_plans, # title = _('My development plans') # )) return JsonResponse(status=200,data={ # 'widgets': model_to_dict(widgets), 'employee': model_to_dict(employee), # 'development_plans': development_plans })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dashboard():", "def dashboard():\n # Get current user\n user = current_user\n # Get tip of the day\n tip = gdb.gettipofday()\n # Get current user Leaderboard Status\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('dashboard.html', user=user,\n leaderboard=leaderboard,\n userbalance=current_user.balance, tip=tip,\n current_user_info=current_user_info)", "def dashboard(self):\r\n return {}", "def dashboard():\n return render_template(\"admin/dashboard.html\", title=\"Dashboard\")", "def dashboard():\n return render_template('home/dashboard.html',title='SycliQ Dashboard')", "def dashboard(request, template_name=\"admin/dashboard.html\"):\n return render_to_response(template_name, RequestContext(request, {\n 'user_count': User.objects.count(),\n 'reviewgroup_count': Group.objects.count(),\n 'defaultreviewer_count': DefaultReviewer.objects.count(),\n 'repository_count': Repository.objects.accessible(request.user).count(),\n 'has_cache_stats': get_has_cache_stats(),\n 'title': _(\"Dashboard\"),\n 'root_path': settings.SITE_ROOT + \"admin/db/\"\n }))", "def get_overview():\n from app.core.api_views import Api\n from app.modules.overview import inc\n sar = inc.main()\n api = Api()\n return render_template(\"index.html\",\n sar=sar,\n )", "def show_dashboard():\n script, div = plots.make_plot()\n script_tab, div_tab = plots.make_tabs()\n script_trend, div_trend = plots.make_trend()\n\n return render_template('layout.html',\n script=script,\n div=div,\n script_trend=script_trend,\n div_trend=div_trend,\n script_tab=script_tab,\n div_tab=div_tab)", "def dashboard():\n return render_template(\"home/dashboard.html\", title=\"Dashboard\")", "def dashboard():\n return render_template('home/dashboard.html', title=\"Dashboard\")", "def dashboard_page():\n log.info('Load dashboard!')\n navigation = render_template('navigation.html', babyName=firstName)\n return render_template('index.html', babyName=firstName, nav=navigation)", "def dashboard():\n logger.debug(\"User: %s\" % (current_user.get_id()))\n users = mongo.db[app.config['USERS_COLLECTION']]\n user = users.find_one({'username': current_user.get_id()})\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n results = list(monitors.find({'username': current_user.get_id()}))\n results.sort(key=lambda x: x['checked'], reverse=True)\n return render_template('dashboard.html', name=user.get('first_name'),\n monitors=results)", "def overview():\n return render_template('api/api.html', title='API Overview')", "def dashboard_showall():\n tasks = Task.query.all()\n return render_template('home/taskshowall/dashboard_showall.html',\n tasks=tasks, title=\"Tasks\")", "def get(self, request):\n return Response(\"Dashboard Listing Page\", status=status.HTTP_200_OK)", "def dashboard():\r\n return render_template('{}/dashboard.html'.format(MODULE_DIR))", "def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)", "def ViewDashboardView(request, dashboard_id):\n \n dashboard = Dashboard.objects.get(id=dashboard_id)\n \n return Response(DashboardSerializer(dashboard).data)", "def dashboard(request):\n return render(request, \"cells_home.html\", {})", "def my_dashboard(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_proj.ProjectAssocAjaxTable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_task.TaskAssocAjaxTable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n 'project_source' : 'data-dashboard-project-upcoming',\n 'task_source' : 'data-dashboard-task-upcoming',\n 'input_id' : user_con.pk,\n 'print_url':reverse_lazy('my-dashboard-print'),\n },\n )", "def dashboard():\n return render_template('home/dashboard.html')", "def dashboard():\n # TODO: Optionally, old proposals should be shown in a read-only mode.\n talks = Talk.query.current.filter(Talk.user == current_user)\n return render_template(\n 'profile/dashboard.html', talks=talks)", "def dashboard(request):\r\n sources = (models.Source.objects.all().prefetch_related('metric_set')\r\n .order_by('name'))\r\n metrics = SortedDict([(src, src.metric_set.all()) for src in sources])\r\n no_source_metrics = models.Metric.objects.filter(source__isnull=True)\r\n if no_source_metrics:\r\n metrics[''] = no_source_metrics\r\n\r\n if request.META.get('HTTP_X_PJAX', False):\r\n parent_template = 'pjax.html'\r\n else:\r\n parent_template = 'base.html'\r\n return render(request, 'metrics/dashboard.html', {\r\n 'source_metrics': metrics,\r\n 'parent_template': parent_template\r\n })", "def show_dashboard(self):\n secret_cmd = f\"kubectl --kubeconfig {self.kubeconfig} -n kube-system get secret | grep eks-admin | awk '{{print $1}}'\"\n ps_secret = subprocess.Popen(secret_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n secret = ps_secret.communicate()[0].decode(\"utf-8\").strip()\n token_cmd = f\"kubectl --kubeconfig {self.kubeconfig} -n kube-system describe secret {secret} | grep -E '^token' | cut -f2 -d':' | tr -d \\\" \\\"\"\n ps_token = subprocess.Popen(token_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n token = ps_token.communicate()[0].decode(\"utf-8\").strip()\n print(f'{Fore.GREEN}HERE IS YOUR KUBERNETES DASHBOARD TOKEN: {Fore.BLUE}{token}{Style.RESET_ALL}')\n proxy_cmd = f\"kubectl --kubeconfig {self.kubeconfig} proxy -p 8001\"\n subprocess.Popen(\"open http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes\"\n \"-dashboard:/proxy/\", shell=True)\n subprocess.run(proxy_cmd, shell=True)", "def dashboard(request):\n\n c = RequestContext(request, {})\n t = loader.get_template('family_info/dashboard.html')\n return HttpResponse(t.render(c))", "def dashboard():\n\n # get the directories in the data folder\n # (each directory represents another repo)\n repos = os.listdir(DATA)\n\n for repo in repos:\n # remove it if it's not a directory\n if not os.path.isdir(DATA + repo):\n repos.remove(repo)\n\n return render_template('home/dashboard.html', title=\"Dashboard\", repos=repos)", "def show(self, name):\n\t\t# default page content and template\n\t\ttry:\n\t\t\tdata ={}\n\t\t\ttemplate = \"home\"\n\t\t\tget_item = request.args.get(\"get\")\n\n\t\t\tif get_item==\"blog-feed\":\n\t\t\t\tfeeds = []\n\t\t\t\td = feedparser.parse(\"http://blog.flaav.com/feed\")\n\t\t\t\tfeeds.append({\"blog_title\": d[\"feed\"][\"title\"]})\n\t\t\t\tfor entry in d[\"entries\"]:\n\t\t\t\t\tfeeds.append({\"title\": entry[\"title\"], \"link\": entry[\"link\"], \"author\": entry[\"author_detail\"][\"name\"], \"published\": entry[\"published\"]})\n\t\t\t\treturn self.json_out(feeds)\n\t\t\telif get_item==\"active-plugins\":\n\t\t\t\tactive_plugins = []\n\t\t\t\tPA = PluginActions()\n\t\t\t\tfor p_name in PA.get_active_plugins():\n\t\t\t\t\tplugin = PA.get_pluginbyname(p_name=p_name)\n\t\t\t\t\tif plugin:\n\t\t\t\t\t\tactive_plugins.append({\"url\": url_for(\"pluginloader.index\", plugin_id=plugin.id, action=\"view\"), \"name\": plugin.name, \"plugin_id\": plugin.id})\n\t\t\t\treturn self.json_out(active_plugins)\n\n\t\t\t# statistics \n\t\t\tif name==\"home\":\n\t\t\t\tDA = DashboardActions()\n\t\t\t\tdata.update({\"media_stat\": DA.get_statistics()})\n\t\t\t# ===\n\t\t\tdata.update({\"dashboard_page\": template})\n\t\t\treturn self.render(\"dashboard/\"+template+\".html\", data=data)\n\t\texcept Exception as e:\n\t\t\tself.error_handle.get_error(error=str(e), occurred_at=\"mad.modules.DashboardView.show()\")\n\t\t\tabort(500)", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def my_dashboard_print(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_assoc.ProjectAssocTable_Printable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_assoc.TaskAssocTable_Printable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard_printable.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n },\n )", "def dashboard(self) -> api.Dashboard:\n return self._get_model(model=api.Dashboard)", "def get_dashboard_ui(self):\n vmem = psutil.virtual_memory()\n megabyte = 1024 * 1024\n gigabyte = megabyte * 1024\n mfree = vmem.available/megabyte\n mmax = vmem.total/megabyte\n\n disk = psutil.disk_usage('/')\n dmax = disk.total/gigabyte\n dfree = disk.free/gigabyte\n\n return self.loader.load(\"system_dashboard.html\").generate(\n freemem=mfree,\n maxmem=mmax,\n freedisk=dfree,\n maxdisk=dmax,\n partition=partition\n )", "def dashboard(self, slug):\r\n return resources.Dashboard(self, slug)", "def home(request):\n\n swms = Swms.objects.all()\n\n context = {\n 'swms': swms,\n }\n\n return render(request, 'dashboard.html', context)", "def show(self) -> None:", "def dashboard_view(self):\n return AttrDict({\n 'file_histogram': [h for h in self.file_histogram.values()],\n 'files': [f for f in self.files.values()],\n 'nodes': [\n {\n \"type\": \"Project\",\n \"count\": 1\n },\n {\n \"type\": \"Subject\",\n \"count\": len(self.subjects)\n },\n {\n \"type\": \"Samples\",\n # samples is a dict keyed by subject id, sum the len of each subject's sample list\n \"count\": sum([len(sl) for sl in list(self.samples.values())])\n },\n ],\n 'size': sum([f['size']for f in self.files.values()]),\n 'project_id': self.name,\n 'public': self.attributes['public'],\n 'createdDate': self.attributes.workspace.createdDate,\n 'lastModified': self.attributes.workspace.lastModified,\n 'data_type': self.data_type,\n 'data_category': self.data_category,\n 'problems': self.problems\n })", "def AdminDashboard(user=None):\n\n\tif user == None:\n\t\tuser= defaultUser\n\n\ttable = user.htmlTable()\n\n\treturn render_template('adminDash.html', table=table, user = user.name)", "def dashboard(request):\r\n profile = get_object_or_404(Profile, user=request.user)\r\n wallet = Wallet.objects.get(user=request.user)\r\n history = History.objects.get(pk=1)\r\n referrals = Referral.objects.filter(referee=request.user).count()\r\n invoices = Invoice.objects.filter(issuer=request.user).count()\r\n return render(request, 'coin/dashboard.html', {'profile': profile, \r\n 'wallet': wallet, 'history': history, 'referrals': referrals, \r\n 'invoices': invoices})", "async def dashboard(request):\n return [\n {'name': 'application config', 'value': {k: str(v) for k, v in app.cfg}},\n {'name': 'request headers', 'value': dict(request.headers)},\n ]", "def index(self):\n return self.render(\"admin/index.html\")", "def dashboard(request):\r\n if not request.user.is_staff:\r\n raise Http404\r\n\r\n # results are passed to the template. The template knows how to render\r\n # two types of results: scalars and tables. Scalars should be represented\r\n # as \"Visible Title\": Value and tables should be lists of lists where each\r\n # inner list represents a single row of the table\r\n results = {\"scalars\":{},\"tables\":{}}\r\n\r\n # count how many users we have\r\n results[\"scalars\"][\"Unique Usernames\"]=User.objects.filter().count()\r\n results[\"scalars\"][\"Activated Usernames\"]=User.objects.filter(is_active=1).count()\r\n\r\n # count how many enrollments we have\r\n results[\"scalars\"][\"Total Enrollments Across All Courses\"] = CourseEnrollment.objects.filter(is_active=1).count()\r\n\r\n # establish a direct connection to the database (for executing raw SQL)\r\n cursor = connection.cursor()\r\n\r\n # define the queries that will generate our user-facing tables\r\n # table queries need not take the form of raw SQL, but do in this case since\r\n # the MySQL backend for django isn't very friendly with group by or distinct\r\n table_queries = {}\r\n table_queries[\"course registrations (current enrollments)\"] = \"\"\"\r\n select\r\n course_id as Course,\r\n count(user_id) as Students\r\n from student_courseenrollment\r\n where is_active=1\r\n group by course_id\r\n order by students desc;\"\"\"\r\n table_queries[\"number of students in each number of classes\"] = \"\"\"\r\n select registrations as 'Registered for __ Classes' ,\r\n count(registrations) as Users\r\n from (select count(user_id) as registrations\r\n from student_courseenrollment\r\n where is_active=1\r\n group by user_id) as registrations_per_user\r\n group by registrations;\"\"\"\r\n\r\n # add the result for each of the table_queries to the results object\r\n for query in table_queries.keys():\r\n cursor.execute(table_queries[query])\r\n results[\"tables\"][query] = SQL_query_to_list(cursor, table_queries[query])\r\n\r\n context={\"results\":results}\r\n\r\n return render_to_response(\"admin_dashboard.html\",context)", "def show(self):\n\n pass", "def dashboards(self):\r\n return resources.Dashboards(self)", "def view(self):", "def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))", "def overview():\r\n # Update the list of languages allowed on the site, \r\n # except for the language used by your users at that time.\r\n if request.method == 'POST':\r\n lan_object = Languages()\r\n data = lan_object.update()\r\n message = lan_object.message\r\n status = lan_object.status\r\n \r\n # Gets documents from the collections of all languages \r\n languages_list = g.languages_object.get_languages(1)\r\n language_chosen = g.languages_object.get_languages(2)\r\n return render_template( '{}/index.html'.format(MODULE_DIR), **locals())", "def home(request):\n\n context = {\n \"resource_id\": request.GET.get(\"resource_id\"),\n \"aggregation_id\": request.GET.get(\"aggregation_path\"),\n \"geoserver_url\": app.get_custom_setting(\"geoserver_url\"),\n \"hydroserver_url\": app.get_custom_setting(\"hydroserver_url\"),\n \"max_layers\": app.get_custom_setting(\"max_layers\")\n }\n\n return render(request, 'hydroshare_data_viewer/home.html', context)", "def explainerdashboard_cli(ctx):", "def test_dashboards_v2_show(self):\n pass", "def page_dashboard(state):\n\n st.title(\":chart_with_upwards_trend: Prediction Results Dashboard\")\n\n st.markdown(\"# Select Stocks to View Results:\")\n if state.finalized_data:\n for stock_data in state.finalized_data:\n st.write(\"---\")\n st.markdown(\"## \" + stock_data[\"stock\"])\n if st.checkbox(\"View Results for \" + stock_data[\"stock\"]):\n\n ############################################\n\n st.markdown(\"### Historical Predictions:\")\n\n df2 = pd.DataFrame.from_dict(stock_data[\"prev_predictions\"])\n\n select_lbl = (\n \"Enter the names of models for \" + stock_data[\"stock\"] + \":\"\n )\n models_selections = st.multiselect(\n label=select_lbl,\n options=df2.columns,\n ) # allow users to display specific model results on dataframe graph\n\n if not models_selections: # if nothing is selected show all models!\n st.line_chart(df2)\n else:\n st.line_chart(df2[models_selections])\n\n st.markdown(\n \"*Note:* 'Prices' are the actual prices for those days. The rest are model predictions for those days.\\nPrices (in USD) are on the y-axis, the day number in the data is on the x-axis.\"\n )\n\n ############################################\n\n st.markdown(\"### Future (Next-Day) Predictions:\")\n\n df = pd.DataFrame()\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"swing_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"next_day_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame([stock_data[\"prediction_results\"][\"model_scores\"]])\n )\n\n df.index = [\n \"Swing Predicton\",\n \"Price Prediction ($)\",\n \"Model Fit Score\",\n ]\n df = df.transpose()\n df # display chart\n\n st.markdown(\n \"- The current price of the stock is *$\"\n + str(\n round(stock_data[\"prediction_results\"][\"current_prev_close\"], 2)\n )\n + \"*.\"\n )\n\n if state.period == \"1mo\":\n st.markdown(\"- *Recommended Model (for 1mo):* SVR-RBF\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"6mo\":\n st.markdown(\n \"- *Recommended Model (for 6mo):* SVR-Poly (most recommended), LR, EN, or Lasso.\"\n )\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"1y\":\n st.markdown(\"- *Recommended Model (for 1yr):* SVR-Poly\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n else:\n st.markdown(\n \"- *Note:* View the home screen for information about the best models and training data size combinations.\"\n )\n\n ############################################\n st.markdown(\"### View Other Information:\")\n\n if st.checkbox(\n \"View \" + stock_data[\"stock\"] + \"'s Model Efficiency Timings\"\n ):\n st.markdown(\"#### Model Efficiencies:\")\n st.markdown(\n \"Shows the time in seconds it took models to complete specific tasks:\"\n )\n df3 = pd.DataFrame()\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"training_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"testing_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"new_predictions_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"prev_predictions_times\"]]\n )\n )\n df3.index = [\n \"Training\",\n \"Testing/Scoring\",\n \"Future Predictions\",\n \"Historical Predictions\",\n ]\n df3 = df3.transpose()\n df3\n\n ############################################\n\n if st.checkbox(\"View \" + stock_data[\"stock\"] + \"'s Information\"):\n st.markdown(\"#### Company Information:\")\n for key in stock_data[\"stock_info\"].keys():\n st.write(\"*\", key + \":\", stock_data[\"stock_info\"][key])\n else:\n st.markdown(\n \"## Generate data to populate and initialize this page by going to the 'Settings' page and running the tool!\"\n )", "def get(self):\n accounts = self.get_account_data()\n transactions = self.get_transaction_data()\n return render_template(\n \"index.html\", page_name=\"Main\", accounts=accounts, transactions=transactions\n )", "def overview():\n # TODO: fix ajax https://groups.google.com/d/msg/web2py/YyVilc2ywdg/ZLtN3Gg3Ft0J\n # TODO: fix ?plain link in results\n from plugin_introspect import get_task_code\n lesson = request.args[0] # controller with lesson contents\n # lesson = request.vars.lesson_controller # controller with lesson contents\n fun_names = exposed_functions_names( controller=lesson )\n exposed_functions = generate_exposed_functions_info( controller=lesson )\n examples_codes = [ get_task_code(code=exposed_functions[f]['code'], task_key=lesson+'/'+f, decorate=True) for f in fun_names ]\n results_urls = [ URL(lesson, f, vars=dict(plain=1)) for f in fun_names ]\n return response.render('tutor.html', dict(lesson=lesson, fun_names=fun_names, examples_codes=examples_codes, results_urls=results_urls) )", "def show(self):\n pass", "def index():\n graphs = [\n message_genre_bar_chart(df),\n category_bar_chart(df),\n top_words_bar_chart(df)\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)", "def index():\n title = \"Application process \"\n links = {'mentors': 'mentors',\n 'schools': 'all-school',\n 'mentors_by_country': 'mentors-by-country',\n 'contacts': 'contacts',\n 'applicants': 'applicants',\n 'applicants_and_mentors': 'applicants-and-mentors'}\n menu = ['Show mentors and schools',\n 'Show mentors and all schools',\n 'Show mentors by country',\n 'Show contacts',\n 'Show applicants',\n 'Show applicants and mentors']\n return render_template('index.html', links=links, menu=menu, title=title)", "def index():\n context = make_context()\n\n # Nav needs to be a list of lists.\n # The inner list should only have four objects max.\n # Because of reasons.\n context['nav'] = []\n contents = list(context['COPY']['content'])\n not_yet_four = []\n\n for idx, row in enumerate(contents):\n row = dict(zip(row.__dict__['_columns'], row.__dict__['_row']))\n row_title = row.get('data_panel', None)\n\n if row_title:\n if row_title not in ['_', 'introduction', 'data_panel', 'about']:\n not_yet_four.append(row)\n\n if len(not_yet_four) == 4:\n context['nav'].append(not_yet_four)\n not_yet_four = []\n\n if (idx + 1) == len(contents):\n if len(not_yet_four) > 0:\n context['nav'].append(not_yet_four)\n\n return render_template('index.html', **context)", "def Dashboard(user=None):\n\n\tif user == None:\n\t\tuser= defaultUser\n\n\ttable = user.htmlTable(head=5)\n\t\n\n\tphysics_score = user.subjectAccuracy(\"Physics\")\n\tbiology_score = user.subjectAccuracy(\"Biology\")\n\n\tbiology_numerator = biology_score[1]\n\tbiology_denominator = biology_score[0]\n\tbiology_accuracy = int(np.round(biology_score[2], 2) * 100)\n\n\tphysics_numerator = physics_score[1]\n\tphysics_denominator = physics_score[0]\n\tphysics_accuracy = int(np.round(physics_score[2], 2) * 100)\n\n\ttotal_questions = biology_denominator + physics_denominator\n\n\n\twikifier_results = {}\n\twikifier_results[\"Oski\"] = \"https://en.wikipedia.org/wiki/Oski_the_Bear\"\n\twikifier_results[\"Mitosis\"] = \"https://en.wikipedia.org/wiki/Mitosis\"\n\twikifier_results[\"Gravity\"] = \"https://en.wikipedia.org/wiki/Gravity\"\n\n\treturn render_template('indexStudent.html', user=user.name, table=table, wikifier_results=wikifier_results, \n\t\tphysics_numerator = physics_numerator, physics_denominator = physics_denominator, physics_accuracy = physics_accuracy, \n\t\tbiology_accuracy = biology_accuracy, biology_numerator = biology_numerator, biology_denominator = biology_denominator, total_questions=total_questions)", "def index():\n \n currentDateTime = current_datetime()\n fromDateTime = calc_day(currentDateTime, -3)\n\n # Adjust if any graphs should be shown in index page\n # Temperatur=XML(render_graph(3, 5, fromDateTime, currentDateTime, show_dots=False))\n # Procent_smoke=XML(render_graph(3, 6, fromDateTime, currentDateTime, show_dots=False))\n # Kitchen_Stove=XML(render_graph(2, 3, fromDateTime, currentDateTime, show_dots=False))\n # Humid=XML(render_graph(3, 4, fromDateTime, currentDateTime, show_dots=False))\n # Brightness=XML(render_graph(3, 7, fromDateTime, currentDateTime, show_dots=False))\n # Hall_motions=XML(render_graph(1, 1, fromDateTime, currentDateTime, show_dots=False, hits=True))\n # Hall_door=XML(render_graph(1, 2, fromDateTime, currentDateTime, show_dots=False, on_off=['Open', 'Close']))\n\n # return dict(test=locals())\n # return dict(test=device_monitoring)\n return dict()", "def index():\n try:\n # Retrieve a list of active clients from the BancBox API for \n # the right side bar.\n active_clients = api.get_active_clients()\n except Exception, e:\n active_clients = []\n logger.error('Error retrieving active clients: %s', e)\n return render_template('index.html', active_clients=active_clients)", "def index():\r\n if current_user.is_authenticated():\r\n user_id = current_user.id\r\n else:\r\n user_id = 'anonymous'\r\n top_users = cached_users.get_leaderboard(current_app.config['LEADERBOARD'],\r\n user_id=user_id)\r\n\r\n return render_template('/stats/index.html', title=\"Community Leaderboard\",\r\n top_users=top_users)", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def index():\n\tbusiness_news = get_news(\"business\")\n\tsports_news= get_news(\"sports\")\n\ttechnology_news= get_news(\"technology\")\n\tentertainment_news= get_news(\"entertainment\")\n\thealth_news=get_news(\"health\")\n\n\treturn render_template('index.html',health=health_news,business=business_news,sports= sports_news,technology= technology_news,entertainment= entertainment_news)", "def index():\n # create table for original dataset\n table_1 = data_table_low(filepath = \"sparkify_data.csv\", title='Raw Sparkify Data')\n\n table_2 = data_table_low(filepath = \"cleaned_data.csv\", title='Cleaned Sparkify Data')\n\n # create and append plotly visuals into an array to be passed later for graphJSON file\n graphs = [table_1, table_2]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template(\"master.html\", ids=ids, graphJSON=graphJSON)", "def index(self):\n\t\treturn render_template('index.html')", "def index(self):\n return render_template('main/index.html')", "async def view():\n\n components = await get_data()\n content = load.RELOADICON.format('/reloadstore')\n content += load.SEARCHBAR\n\n for component in components:\n cardtitle = ''\n\n if not components[component]['trackable']:\n continue\n\n if components[component]['has_update']:\n cardtitle += load.UPDATEICON\n\n cardtitle += component\n\n needs_migration = await migration_needed(component)\n\n if needs_migration:\n cardtitle += load.TOOLTIP.format('Migration needed')\n\n elif not components[component]['embedded']:\n cardtitle += load.TOOLTIP.format('Not managable')\n\n cardcontent = load.META.format(\n type='author', text=components[component]['author']['login'])\n\n cardcontent += load.TEXT.format(components[component]['description'])\n cardbutton = load.LINK.format(\n url='/component/'+component, target='_self',\n style='', id='', htmlclass='', extra='', text='More info')\n\n content += load.BUTTON_CARD.format(\n title=cardtitle, content=cardcontent, buttons=cardbutton)\n\n html = load.TOP\n html += load.BASE.format(content)\n html += load.END\n\n return html", "def show(self):", "def index():\r\n\r\n title = \"Global Statistics\"\r\n\r\n n_auth = n_auth_users()\r\n\r\n n_anon = n_anon_users()\r\n\r\n n_total_users = n_anon + n_auth\r\n\r\n n_published_apps = cached_apps.n_published()\r\n n_draft_apps = cached_apps.n_draft()\r\n n_total_apps = n_published_apps + n_draft_apps\r\n\r\n n_tasks = n_tasks_site()\r\n\r\n n_task_runs = n_task_runs_site()\r\n\r\n top5_apps_24_hours = get_top5_apps_24_hours()\r\n\r\n top5_users_24_hours = get_top5_users_24_hours()\r\n\r\n locs = get_locs()\r\n\r\n show_locs = False\r\n if len(locs) > 0:\r\n show_locs = True\r\n\r\n stats = dict(n_total_users=n_total_users, n_auth=n_auth, n_anon=n_anon,\r\n n_published_apps=n_published_apps,\r\n n_draft_apps=n_draft_apps,\r\n n_total_apps=n_total_apps,\r\n n_tasks=n_tasks,\r\n n_task_runs=n_task_runs)\r\n\r\n users = dict(label=\"User Statistics\",\r\n values=[\r\n dict(label='Anonymous', value=[0, n_anon]),\r\n dict(label='Authenticated', value=[0, n_auth])])\r\n\r\n apps = dict(label=\"Apps Statistics\",\r\n values=[\r\n dict(label='Published', value=[0, n_published_apps]),\r\n dict(label='Draft', value=[0, n_draft_apps])])\r\n\r\n tasks = dict(label=\"Task and Task Run Statistics\",\r\n values=[\r\n dict(label='Tasks', value=[0, n_tasks]),\r\n dict(label='Answers', value=[1, n_task_runs])])\r\n\r\n return render_template('/stats/global.html', title=title,\r\n users=json.dumps(users),\r\n apps=json.dumps(apps),\r\n tasks=json.dumps(tasks),\r\n locs=json.dumps(locs),\r\n show_locs=show_locs,\r\n top5_users_24_hours=top5_users_24_hours,\r\n top5_apps_24_hours=top5_apps_24_hours,\r\n stats=stats)", "def show_logs():\n nodes=hl.getAllNodes();\n\n return render_template('logs.html',nodes = nodes)", "def index() -> str:\n return render_template('index.html', username=getpass.getuser(), hostname=socket.gethostname(),\n manager_host=DASHBOARD_MANAGER_HOST.value.decode() or 'localhost',\n manager_port_nr=DASHBOARD_MANAGER_PORT.value)", "def dashboard():\n \n if 'username' in session:\n user = mongo.db.user.find_one({'username': session['username']})\n \n image_file = 'https://codeflow-app.s3-eu-west-1.amazonaws.com/'+ user['profile_image']\n \n \n # created content\n articles = list(mongo.db.articles.find({'user_id': user['_id']}).sort('date',pymongo.DESCENDING))\n profile = mongo.db.profiles.find_one({'user_id': user['_id']})\n projects = list(mongo.db.projects.find({\n 'user_id': user['_id']}).sort('date',pymongo.DESCENDING))\n snt_profile_msgs = list(mongo.db.profile_msgs.find({\n 'from_user': user['username']}).sort('date',pymongo.DESCENDING))\n snt_project_msgs = list(mongo.db.project_msgs.find({\n 'from_user': user['username']}).sort('date',pymongo.DESCENDING))\n snt_pieces = list(mongo.db.project_pieces.find({\n 'owner': user['username']}).sort('date',pymongo.DESCENDING))\n \n # received content\n rcvd_profile_msgs = list(mongo.db.profile_msgs.find({\n 'to_user': user['username']}).sort('date',pymongo.DESCENDING))\n rcvd_project_msgs = list(mongo.db.project_msgs.find({\n 'to_user': user['username']}).sort('date',pymongo.DESCENDING)) \n rcvd_pieces = list(mongo.db.project_pieces.find({\n 'assignee': user['username']}).sort('date',pymongo.DESCENDING))\n\n return render_template('pages/dashboard.html', \n title='Dashboard',\n articles=articles,\n profile=profile,\n projects=projects,\n snt_profile_msgs=snt_profile_msgs,\n snt_project_msgs=snt_project_msgs,\n snt_pieces=snt_pieces,\n rcvd_profile_msgs=rcvd_profile_msgs,\n rcvd_project_msgs=rcvd_project_msgs,\n rcvd_pieces=rcvd_pieces,\n user=user,\n image_file=image_file\n )\n\n flash('You need to be logged in to access your dashboard.', 'warning')\n return redirect(url_for('login'))", "def index(request):\n if request.user.is_authenticated:\n return redirect('/dashboard')\n else:\n context = {'client_id': settings.OPENHUMANS_CLIENT_ID,\n 'oh_proj_page': settings.OH_ACTIVITY_PAGE}\n\n return render(request, 'main/index.html', context=context)", "def index():\n view_dict = get_opentree_services_method_urls(request)\n view_dict['maintenance_info'] = get_maintenance_info(request)\n if auth.is_logged_in():\n # user is logged in, filter to their own collections by default?\n pass\n else:\n # anonymous visitor, show unfiltered list?\n pass\n\n return view_dict", "def dashboard_info(path):\n logging.info(\n \"Searching path `{}` for JSON dashboards...\".format(path)\n )\n print(info_dashboards(path))", "def admin_index():\n return 'Super-seekrit admin page.'", "def dashboards(self) -> dict:\n return Config.get_dashboards()", "def show_main():\n db = get_db()\n now = datetime.datetime.now()\n rooms = app.config[\"HC_CONFIG\"].values()\n rooms.sort(key=config_lexer.Room.sortkey)\n # Get the three newest counts from the database, sorted by the\n # user-provided time\n newest_counts = db.get_newest_counts(3, hc_db.NewestSort.ENTERED_TIME)\n recent_counts = []\n for count in newest_counts:\n room_rows = db.get_roomdata_for_count_id(count[\"id\"])\n # I couldn't think of a short, descriptive name for this variable.\n some_dict = {\"date\": count[\"entered_time\"], \"counts\": {}}\n for row in room_rows:\n some_dict[\"counts\"][row[\"room\"]] = row[\"people_count\"]\n some_dict[\"counts\"] = OrderedDict(\n sorted(some_dict[\"counts\"].items(), key=sort_count_data)\n )\n recent_counts.append(some_dict)\n if is_admin(session[\"username\"]):\n buttons = [\n NavButton(url_for(\"show_admin\"), \"Administration\"),\n NavButton(url_for(\"show_help\"), \"Help\"),\n NavButton(url_for(\"logout\"), \"Log Out\"),\n ]\n else:\n buttons = [\n NavButton(url_for(\"show_help\"), \"Help\"),\n NavButton(url_for(\"logout\"), \"Log Out\"),\n ]\n return render_template(\n \"main.html\",\n buttons=buttons,\n rooms=rooms,\n recent_counts=recent_counts,\n datewhen=now.strftime(\"%Y-%m-%d\"),\n timewhen=now.strftime(\"%H:%M\"),\n )", "def dashboards(request):\n user = getattr(request, 'user')\n context = {}\n\n if not isinstance(user, AnonymousUser):\n context.update({\n 'user_dashboards': get_user_dashboards(user)\n })\n return context", "def overview():\n subjects = get_latest(10)\n return render_template('subject.html', subjects=subjects)", "def index(self):\n log.debug('index()')\n return redirect_to('/admin/dashboard')", "def index(self):\n return self.html", "def index(self):\n return self.load_view('index.html')", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def dashboard(request):\n template = \"pages/dashboard.html\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(render(request, \"403.html\"))\n if request.method == \"GET\":\n has_linked_github = methods.has_social_token(request.user)\n if not has_linked_github:\n messages.warning(\n request,\n \"\"\"\n Your account does not have a linked GitHub account.\n It is recommended to link your account,\n otherwise your leaderboards may only be able to refresh infrequently.\n \"\"\",\n )\n ctx = dashboard_context(request)\n return render(request, template, context=ctx)\n elif request.method == \"POST\":\n return dashboard_post(request)\n else:\n raise Http404(\"HTTP method not defined on this page\")", "def atlas_dashboard(request):\n if request.user.is_authenticated():\n try: \n views_helperobj = ViewsHelper()\n objhelper_obj = ObjectHelper()\n\n if request.is_ajax() or request.method == 'POST':\n return views_helperobj.handle_dashboard_post_requests(request)\n \n module_list = views_helperobj.create_module_list()\n dashboard_context = views_helperobj.generate_dashboard_data(request)\n return render_to_response('appv1/dashboard.html', \\\n dashboard_context, context_instance=RequestContext \\\n (request, processors = [custom_processor]))\n except PermissionDenied:\n return render_to_response('appv1/403.html')\n except TemplateDoesNotExist:\n return render_to_response('appv1/404.html')\n except Exception:\n return render_to_response('appv1/500.html')", "def index():\n if (session_get_int(\"user_id\") is not None):\n return render_template(\"dashboard.html\")\n else:\n return render_template(\"index.html\")", "def view_system():\n\n pass", "def _get_dashboard_object(self):\n pass", "def overview(request):\n LOGGER.info('Rendering WMT16 HIT overview for user \"{0}\".'.format(\n request.user.username or \"Anonymous\"))\n \n # Re-initialise random number generator.\n seed(None)\n \n # Collect available language pairs for the current user.\n language_codes = set([x[0] for x in LANGUAGE_PAIR_CHOICES])\n language_pairs = request.user.groups.filter(name__in=language_codes)\n \n # Collect available annotation projects for the current user.\n annotation_projects = request.user.project_set.all()\n \n hit_data = []\n total = [0, 0, 0]\n\n for language_pair in language_pairs:\n for annotation_project in annotation_projects:\n hit = _compute_next_task_for_user(request.user, annotation_project, language_pair)\n user_status = HIT.compute_status_for_user(request.user, annotation_project, language_pair)\n for i in range(3):\n total[i] = total[i] + user_status[i]\n \n if hit:\n # Convert status seconds back into datetime.time instances.\n for i in range(2):\n user_status[i+1] = seconds_to_timedelta(int(user_status[i+1]))\n \n hit_data.append(\n (hit.get_language_pair_display(), hit.get_absolute_url(),\n hit.hit_id, user_status, annotation_project)\n )\n \n # Convert total seconds back into datetime.timedelta instances.\n total[1] = seconds_to_timedelta(int(total[2]) / float(int(total[0]) or 1))\n \n # Remove microseconds to get a nicer timedelta rendering in templates.\n total[1] = total[1] - timedelta(microseconds=total[1].microseconds)\n \n total[2] = seconds_to_timedelta(int(total[2]))\n \n groups = _identify_groups_for_user(request.user)\n group = None\n if len(groups) > 1:\n LOGGER.debug(u'User \"{0}\" assigned to multiple annotation groups: {1}'.format(\n request.user.username or u'Anonymous',\n u', '.join([x.name for x in groups]))\n )\n group = groups[0]\n \n if group is not None:\n group_name = group.name\n group_status = HIT.compute_status_for_group(group)\n for i in range(2):\n group_status[i+1] = seconds_to_timedelta(int(group_status[i+1]))\n \n else:\n group_status = None\n group_name = None\n \n LOGGER.debug(u'\\n\\nHIT data for user \"{0}\":\\n\\n{1}\\n'.format(\n request.user.username or \"Anonymous\",\n u'\\n'.join([u'{0}\\t{1}\\t{2}\\t{3}'.format(*x) for x in hit_data])))\n\n # Compute admin URL for super users.\n admin_url = None\n if request.user.is_superuser:\n admin_url = reverse('admin:index')\n \n dictionary = {\n 'active_page': \"OVERVIEW\",\n 'hit_data': hit_data,\n 'total': total,\n 'group_name': group_name,\n 'group_status': group_status,\n 'admin_url': admin_url,\n 'title': 'WMT16 Dashboard',\n 'annotation_groups': [x.name for x in groups],\n }\n dictionary.update(BASE_CONTEXT)\n \n LOGGER.info(dictionary.values())\n \n return render(request, 'wmt16/overview.html', dictionary)", "def usersview_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'usersview_admin.jinja2',\n users=users\n )", "def home():\n\n # Provide the date range (from the most distant to the recent date) for\n # filtering in the last two API routes\n session = Session(engine)\n start_limit = session.query(Measurement.date).filter(Measurement.date).\\\n order_by(Measurement.date).first()\n end_limit = session.query(Measurement.date).filter(Measurement.date).\\\n order_by(Measurement.date.desc()).first()\n\n return (\n f'Available Routes:<br/>'\n f'<br/>'\n f'/api/v1.0/precipitation<br/>'\n f'/api/v1.0/stations<br/>'\n f'/api/v1.0/tobs<br/>'\n f'<br/>'\n f'/api/v1.0/start<br/>'\n f'/api/v1.0/start/end<br/>'\n f'<br/>'\n f'*Please use \"yyyy-mm-dd\" as the date format to replace the \"start\" and/or \"end\" parameter(s) in the last two API routes in order to filter summarized temperature results based on desired date range:<br/>'\n f'The earliest date available in this dataset is {start_limit[0]}<br/>'\n f'The most recent date available in this dataset is {end_limit[0]}<br/>'\n )", "def main_screen():\n if current_user.is_authenticated:\n\n activities = Run.query.filter_by(user_id=current_user.get_id())\n\n total_duration = sum([a.duration for a in activities.all()])\n hours = str(int(total_duration//3600))\n minutes = str(int((total_duration % 3600)//60)).rjust(2, '0')\n seconds = str(int(total_duration % 60)).rjust(2, '0')\n\n stats = {'count': len(activities.all()),\n 'total_distance': sum([a.distance for a in activities.all()]),\n 'total_hours': hours,\n 'total_minutes': minutes,\n 'total_seconds': seconds,\n 'total_calories': sum([a.calories for a in activities.all()])}\n\n return render_template('main.html', stats=stats)\n else:\n return render_template('main.html')", "def views(request):\n tag = Tag.objects.filter(name=\"Global\")\n gauges = Gauge.objects.filter(tags=tag)\n return render_to_response('dashboard/views.js',{'gauges': gauges} )", "def summary_page() :\r\n logger.debug(\"\")\r\n model = session_info.get_user_model(session)\r\n return render_template( \"summary_page.html\" , model=model ,\r\n stat_types=param_stats.StatTypes )", "def send_to_dashboard():\n\t# purchased and conventional\n\tpurchase_activity_conv = db.session.query(PurchaseActivity).filter_by(purchased=True, conventional=True).all()\n\t# purchased and organic\n\tpurchase_activity_organic = db.session.query(PurchaseActivity).filter_by(purchased=True, organic=True).all()\n\t# search activity \n\tsearch_activity = db.session.query(SearchActivity).all()\n\n\t# plot search_activity over time and purchase_activity over time\n\n\tlist_of_dict = []\n\n\n\tdata = {}\n\tdatasets_dict = {}\n\tdatasets_dict['label'] = \"Search Activity, Items Purchased over Time\"\n\tdatasets_dict['fillColor'] = \"rgba(220,220,220,0.5)\"\n\tdatasets_dict['strokeColor'] = \"rgba(220,220,220,0.8)\"\n\tdatasets_dict['highlightFill'] = \"rgba(220,220,220,0.75)\"\n\tdatasets_dict['highlightStroke'] = \"rgba(220,220,220,1)\"\n\tdatasets_dict['data'] =search_activity, purchase_activity_organic, purchase_activity_conv\n\tdata['labels'] = time\n\tdata['datasets'] = [datasets_dict]\n \n\n\tlist_of_dict.append(data)\n\tprint list_of_dict \t\n\n\treturn render_template(\"/dashboard.html\")", "def index(request):\n data = Information.objects.all()\n args = {'data': data}\n return render_to_response('tasks/index.html', args, context_instance=RequestContext(request))", "def index(request):\n return render(request, 'vaxcharts/home.html')", "def main_index():\n\n return render_template(\n \"index.html\",\n groups=[{\"name\": f, \"logs\": log_metadata(f)} for f in FOCUS_GROUPS],\n css=get_style(),\n )", "def studio_view(self, context):\n\t\thtml = self.resource_string(\"public/admin/html/mainMenu.html\")\n\t\tfrag2 = Fragment(html.format(self=self))\n\t\thtml2 = self.resource_string(\"public/html/New_Tournament.html\")\n\t\tfrag2.add_css(self.resource_string(\"public/admin/css/tournamentcreator.css\"))\n\t\timg=self.runtime.local_resource_url(self, \"public/admin/icons/listasd.png\")\n\t\t\n\t\tfrag2.add_content(html2)\n\t\tfrag2.add_javascript(self.resource_string(\"public/admin/js/interface.js\"))\n\t\tfrag2.initialize_js('studio')\n\t\treturn frag2\n\t\t\n\t\t# TO-DO: change this view to display your data your own way.", "def dashboard(request):\n # Ci pensa datatables a popolare la tabella\n title =_(\"Pannello di controllo\")\n sub_title = _(\"Gestisci i tuoi ticket o aprine di nuovi\")\n template = \"user/dashboard.html\"\n tickets = Ticket.objects.filter(created_by=request.user)\n not_closed = tickets.filter(is_closed=False)\n # unassigned = []\n # opened = []\n unassigned = 0\n opened = 0\n for nc in not_closed:\n if nc.has_been_taken():\n # opened.append(nc)\n opened += 1\n else:\n # unassigned.append(nc)\n unassigned += 1\n # chiusi = tickets.filter(is_closed=True)\n chiusi = tickets.filter(is_closed=True).count()\n\n messages = 0\n for ticket in tickets:\n messages += ticket.get_messages_count(by_operator=True)[1]\n\n d = {'ticket_messages': messages,\n 'priority_levels': settings.PRIORITY_LEVELS,\n 'sub_title': sub_title,\n 'ticket_aperti': opened,\n 'ticket_chiusi': chiusi,\n 'ticket_non_gestiti': unassigned,\n 'title': title,}\n\n return render(request, template, d)", "def display_reports(self, layout): # pylint: disable=arguments-differ" ]
[ "0.8157949", "0.7479624", "0.738469", "0.7263216", "0.72455734", "0.7196145", "0.7135433", "0.7082106", "0.7069867", "0.7064431", "0.7064215", "0.70062995", "0.6955068", "0.68793905", "0.68734276", "0.6863764", "0.6852004", "0.68505263", "0.6806025", "0.67434174", "0.67335117", "0.6636597", "0.65967745", "0.6555114", "0.65489215", "0.6516357", "0.64837617", "0.64401484", "0.6426749", "0.6422544", "0.64205134", "0.6418279", "0.6390089", "0.6384641", "0.63793397", "0.6374029", "0.63435817", "0.63362795", "0.63233", "0.6305631", "0.630225", "0.6298205", "0.6297983", "0.6290389", "0.6285934", "0.62275153", "0.6224246", "0.62150097", "0.6204622", "0.6200102", "0.61964613", "0.6194528", "0.6177245", "0.61728936", "0.61613345", "0.61586416", "0.615409", "0.6152533", "0.6149803", "0.6141203", "0.613813", "0.61369824", "0.6133272", "0.6132418", "0.61310667", "0.61301684", "0.612979", "0.61242694", "0.60938", "0.60897636", "0.6084811", "0.6081172", "0.60799557", "0.60700387", "0.6064429", "0.6063576", "0.60603964", "0.60594505", "0.6051077", "0.6037694", "0.60323346", "0.60194683", "0.60094815", "0.6002037", "0.6000471", "0.59994495", "0.5989643", "0.5973927", "0.59715873", "0.59638464", "0.5961108", "0.5959778", "0.59581953", "0.5929879", "0.5923054", "0.59204435", "0.59174526", "0.5916551", "0.5915986", "0.59020907" ]
0.6533786
25
View for list of actions of (current) employee
def action_list(request, employee_id=None): if employee_id: employee = Employee.objects.get(pk=employee_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: raise PermissionDenied() else: employee = request.user.employee_user.first() actions = employee.action_set.all() return TemplateResponse( request, 'mus/action_list.html', dict( actions=actions, employee=employee ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def show_employee_menu(self):\n \n action_str = \"\"\n\n while True:\n print(self.LENGTH_STAR * \"*\")\n print(\"EMPLOYEES MENU\\n\")\n print(\"1 Print overview of all employees\")\n print(\"2 Pilots\")\n print(\"3 Cabin Crew\")\n print(\"B Back\\n\")\n\n action_str = self.choose_action([\"1\", \"2\" ,\"3\" ,\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"1\", \"2\", \"3\", \"b\"])\n\n if action_str == \"1\":\n self.show_overview_of_all_employees()\n\n elif action_str == \"2\":\n self.show_pilot_or_crew_menu(self.PILOT)\n\n elif action_str == \"3\":\n self.show_pilot_or_crew_menu(self.CREW)\n\n elif action_str == \"b\":\n return", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def employee():\n return Response(render_template('employee/employee.html'))", "def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def action_detail(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=int(action_id))\n # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n if not employee.hasAccessTo(action.employee):\n raise PermissionDenied()\n\n if request.method == 'POST':\n form = ActionCommentForm(request.POST)\n if form.is_valid():\n form.save(request.user, action)\n return HttpResponseRedirect('/action/%s' % action_id)\n else:\n form = ActionCommentForm()\n return TemplateResponse(\n request,\n 'mus/action_detail.html',\n dict(\n action=action,\n form=form\n )\n )", "def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})", "def actions(self):\n raise NotImplementedError", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)", "def show_timeline(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n action = None\n if pk:\n action = workflow.actions.filter(pk=pk).first()\n\n if not action:\n # The action is not part of the selected workflow\n return redirect('home')\n logs = workflow.logs.filter(payload__action_id=action.id)\n else:\n logs = workflow.logs\n\n event_names = [\n Log.SCHEDULE_EMAIL_EXECUTE,\n Log.DOWNLOAD_ZIP_ACTION,\n Log.SCHEDULE_JSON_EXECUTE,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SCHEDULE_EMAIL_EDIT,\n Log.SCHEDULE_JSON_EDIT,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SURVEY_INPUT,\n ]\n\n # Filter the logs to display and transform into values (process the json\n # and the long value for the log name\n logs = [\n {'id': log.id,\n 'name': log.get_name_display(),\n 'modified': log.modified,\n 'payload': json.dumps(log.payload, indent=2),\n 'action_name': log.payload['action'],\n 'action_id': log.payload['action_id']}\n for log in logs.filter(name__in=event_names)\n ]\n\n return render(\n request,\n 'action/timeline.html',\n {'event_list': logs, 'action': action})", "def list(self, request):\n\n viewset_list = [\n 'User\\'s action (list,create,retrieve ,update , partial_update)',\n 'Automatically maps to the urls using Routers.',\n 'Provides more functionality with less code.',\n ]\n\n return Response({'message':'Hello From ViewSet' , 'viewset':viewset_list})", "def actors_listing(request,option=None):\n\n if option == \"csv\":\n return generate_actor_csv(request)\n return generate_actor_jtable(request, option)", "def get_action(self, context):\n pass", "def committee_list(request):\n committees = Committee.objects.all().order_by(\"name\")\n\n context = {\"committees\": committees}\n template = \"jurycore/committee_list.html\"\n return render(request, template, context)", "def actions():\n pass", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def actions(self):\n return self._action_list", "def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def list(request):\n return EntryView.__index(request)", "def action(self):\n pass", "def action(self):\n pass", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def get_actions(self):\n return []", "def expense_history(request):\n qs: QuerySet = Expense.objects.by_user(request.user.id)\n file_title: str = \"Latest_150_Expenses\"\n form = ExpenseHistory(request.GET)\n if form.is_valid():\n cd: dict = form.cleaned_data\n target: str = cd[\"target\"]\n user_id = request.user.id\n if target == \"date\":\n qs = Expense.objects.filter(date=cd[\"date1\"], user_id=user_id)\n file_title = f'For_{cd[\"date1\"]}'\n elif target == \"each_month\":\n qs = Expense.objects.filter(date__month=cd[\"month\"], user_id=user_id)\n file_title = f\"Every_{calendar.month_name[cd['month']]}_Month\"\n elif target == \"months\":\n qs = Expense.objects.last_n_months_expense(cd[\"p_months\"], user_id)\n file_title = f\"Last_{cd['p_months']}_months\"\n elif target == \"month\":\n qs = Expense.objects.month_expense(cd[\"month\"], cd[\"year\"], user_id)\n file_title = f'For_{calendar.month_name[cd[\"month\"]]}-{cd[\"year\"]}'\n elif target == \"year\":\n qs = Expense.objects.year_expense(cd[\"year\"], user_id)\n file_title = f\"{cd['year']}\"\n elif target == \"between\":\n qs = Expense.objects.filter(date__gte=cd[\"date1\"], date__lte=cd[\"date2\"],\n user__id=user_id)\n file_title = f'Between_{cd[\"date1\"]}_{cd[\"date2\"]}'\n qs = qs.order_by(\"-date\", \"-id\").values_list(\n \"date\", \"description\", \"category__name\", \"method\", \"app\", \"amount\",\n )\n if not form.is_valid():\n qs = qs[:150]\n qs_list = []\n if qs:\n for q in qs:\n qs_list.append([\n q[0], q[1], q[2], METHOD_DICT[q[3]], APP_DICT.get(q[4], \"Other\"), q[5]\n ])\n file_title = f\"{date.today()}_\" + file_title\n return render(request, \"tracker/history.html\",\n {\"qs\": qs_list, \"file_title\": file_title, \"form\": form})", "def actions(self, request, action_list, group):\n return action_list", "def list(self, request):\n a_viewset = [\n 'uses actions (list,create,retreive,update,partial_update)',\n 'Automatically maps to URLs using routers',\n 'provides more functionality with less code',\n ]\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "def list(self, request):\n a_viewset = [\n 'Uses actions (list, create, retrieve, update, partial_update)',\n 'Automatically maps to URLs using Routers',\n 'Provides more functionality with less code',\n ]\n\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )", "def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def actions(self):\n from moztrap.view.lists.actions import actions\n return actions", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def list(self, request):\n\n a_viewset = [\n 'Uses action (list, create, reteieve, update, partial_update)',\n 'Automatically maps the urls using routers',\n 'provide more functionality with less code',\n ]\n\n return Response({'message': 'Hello', 'a_viewset': a_viewset})", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def __actions__(self, obj):\n\t\t\tprimary_fields \t= self.__provider__.get_primary_fields(self.__entity__)\n\t\t\tpklist \t\t= '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n\n\t\t\tvalue \t\t= '<div>'\n\t\t\tif has_permission('editar_LB'):\n\t\t\t\tvalue = value + '<div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a></div>'\n\t\t\tif has_permission('eliminar_LB'):\n\t\t\t\tvalue = value + '<div><form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\"><input type=\"hidden\" name=\"_method\" value=\"DELETE\" /><input class=\"delete-button\" onclick=\"return confirm(\\'Est&aacute; seguro que desea eliminar?\\');\" value=\"delete\" type=\"submit\" style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/></form></div>'\n\t\t\tvalue = value + '</div>'\n\t\t\treturn value", "def actions(self):\r\n return actions.Actions(self)", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_events_responder(self):\n pass", "def test_ReportingPeriodDetailView_current_employee_toggle(self):\n self.former_employee.user_data.current_employee = True\n self.former_employee.user_data.save()\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 3\n )\n self.former_employee", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def print_user_actions():\n print\n print \"=\" * 80\n print \" User Actions\"\n print \"=\" * 80", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get_list_of_actions(self):\n return self.actions", "def actions() -> None:\n pass", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def action(self) -> str:\n return pulumi.get(self, \"action\")", "def list_actions() -> None:\n colorama_init()\n max_action_name_len = max(len(name) for name in KNOWN_ACTIONS.keys())\n wrapper = textwrap.TextWrapper(\n width=80 - max_action_name_len - 3,\n subsequent_indent=' ' * (max_action_name_len + 3),\n )\n print(\n '{bright}{name:<{max_action_name_len}} -{normal} {doc}'.format(\n bright=Style.BRIGHT,\n name='name',\n max_action_name_len=max_action_name_len,\n normal=Style.NORMAL,\n doc='description [(argument: type, ...)]',\n )\n )\n print('-' * 80)\n for name, action in KNOWN_ACTIONS.items():\n wrapped_doc = wrapper.fill(' '.join(str(action.__doc__).split()))\n print(\n '{bright}{name:<{max_action_name_len}} -{normal} {doc}'.format(\n bright=Style.BRIGHT,\n name=name,\n max_action_name_len=max_action_name_len,\n normal=Style.NORMAL,\n doc=wrapped_doc,\n )\n )\n return None", "def search_entries(self, search_action):\n if search_action == 'e':\n search_query = input('Enter the employee name you want to search for: ')\n elif search_action == 'd':\n print('Enter the date range you want to search for (format MM/DD/YYYY).')\n beg_date = input('Beginning date (i.e. MM/DD/YYYY): ')\n while True:\n try:\n beg_date = datetime.strptime(beg_date, '%m/%d/%Y')\n break\n except ValueError:\n beg_date = input('Whoops, make sure it is the correct format (i.e. MM/DD/YYYY): ')\n end_date = input('Beginning date (i.e. MM/DD/YYYY): ')\n while True:\n try:\n end_date = datetime.strptime(end_date, '%m/%d/%Y')\n break\n except ValueError:\n end_date = input('Whoops, make sure it is the correct format (i.e. MM/DD/YYYY): ')\n\n if beg_date <= end_date:\n search_query = [beg_date, end_date]\n else:\n search_query = [end_date, beg_date]\n\n search_query[1] = search_query[1] + timedelta(hours=23, minutes=59, seconds=59)\n elif search_action == 's':\n search_query = input('Enter the text you want to search for: ')\n else:\n search_query = None\n\n self.view_entries(search_query, search_action)", "def _action(self):\n pass", "def __str__(self):\n return str(self.get_action_display())", "def list_history(request):\n history = History.objects\n\n if not is_admin(request.user):\n history = history.filter(submitter=request.user)\n history = history.order_by('-submission_date')\n\n return render('editor/list_history.mako', request, {\n 'history': history,\n })", "def audit_action(self):\n return self._audit_action", "def list(self, request):\n exp = Experiment.objects.all()\n serializer = ExperimentSerializer(exp, many=True)\n return send_response(request.method, serializer)", "def print_event_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'event_start_date': self.event_start_date,\n 'event_end_date': self.event_end_date,\n 'agenda': self.env.context.get('default_agenda_id'),\n },\n }\n return self.env.ref('agenda_esi.recap_report').report_action(self, data=data)", "def index(request):\r\n return render(request, 'team_tasks_managers/index.html')", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def test_custom_query_response_descriptor_octopus_server_web_api_actions_list_defects_responder(self):\n pass", "def __actions__(self, obj):\n value = '<div>'\n clase = 'actions'\n contexto = \"\"\n perm_mod = PoseePermiso('modificar rol')\n perm_del = PoseePermiso('eliminar rol')\n url_cont = \"/rolesplantilla/\"\n tipo = obj.tipo.lower()\n if (tipo.find(u\"proyecto\") >= 0):\n contexto = \"proyecto\"\n elif (tipo.find(u\"fase\") >= 0):\n contexto = \"fase\"\n else:\n contexto = \"ti\"\n\n if perm_mod.is_met(request.environ):\n value += '<div>' + \\\n '<a href=\"' + url_cont + str(obj.id_rol) + \"/edit?contexto=\"+ \\\n contexto + '\" class=\"' + clase + '\">Modificar</a>' + \\\n '</div><br />'\n\n if perm_del.is_met(request.environ):\n value += '<div><form method=\"POST\" action=\"./' + str(obj.id_rol) + '\" class=\"button-to\">'+\\\n '<input type=\"hidden\" name=\"_method\" value=\"DELETE\" />' +\\\n '<input onclick=\"return confirm(\\'Está seguro?\\');\" value=\"Eliminar\" type=\"submit\" '+\\\n 'style=\"background-color: transparent; float:left; border:0; color: #286571;'+\\\n 'display: inline; margin: 0; padding: 0; margin-left:-3px;\" class=\"' + clase + '\"/>'+\\\n '</form></div><br />'\n value += '</div>'\n return value", "def obtain_action(self):\r\n\t\treturn", "def getActions(self, state): \n util.raiseNotDefined()", "def actions(self):\r\n return Actions(self)", "def event_collaborator_detail(request, event_id, collaborator_id):\n if request.method == 'GET':\n event = get_object_or_404(Event, pk=event_id)\n collaborator = Employee.objects.all().filter(event=event, pk=collaborator_id)\n if collaborator:\n is_registered = True\n else:\n is_registered = False\n serializer = CollaboratorAttendanceSerializer(event, context={'is_registered': is_registered})\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_actions(self):\r\n return -4,4", "def view_index(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n # Get the views\n views = workflow.views.values(\n 'id',\n 'name',\n 'description_text',\n 'modified')\n\n # Build the table only if there is anything to show (prevent empty table)\n return render(\n request,\n 'table/view_index.html',\n {\n 'query_builder_ops': workflow.get_query_builder_ops_as_str(),\n 'table': ViewTable(views, orderable=False),\n },\n )", "def __str__(self):\n return _action_args_dict[self.action].name", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def task_excellent_works_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n task_id = tool.get_param_by_request(request.GET, 'taskId', 0, int)\r\n\r\n c = None\r\n if \"edit\" in action and task_id:\r\n task_excellent_works = api_taskExcellentWorks.select_task_excellent_works_by_task_id(task_id)\r\n c = {\"taskExcellentWorks\": task_excellent_works.result(), \"action\": action}\r\n\r\n if task_excellent_works.is_error():\r\n return render_to_response(\"404.html\", {}, RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/task/taskExcellentWorks_list.html\", c, RequestContext(request))\r\n\r\n if \"edit\" in action and (not task_id):\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n task_excellent_works = api_taskExcellentWorks.select_task_excellent_works_by_id(_id)\r\n c = {\"taskExcellentWorks\": task_excellent_works.result()[0], \"action\": action}\r\n\r\n if task_excellent_works.is_error():\r\n return render_to_response(\"404.html\", {}, RequestContext(request))\r\n\r\n if 'add' in action:\r\n c = {\"task_id\": task_id, \"action\": action}\r\n\r\n return render_to_response(\"mz_course/task/taskExcellentWorks_edit.html\", c, RequestContext(request))", "def __actions__(self, obj):\n value = '<div>'\n clase = 'actions'\n url_cont = \"/roles/\"\n perm_mod = PoseePermiso('modificar rol')\n perm_del = PoseePermiso('eliminar rol')\n \n if perm_mod.is_met(request.environ):\n value += '<div>' + \\\n '<a href=\"' + url_cont + str(obj.id_rol) + \"/edit\"+ \\\n '\" class=\"' + clase + '\">Modificar</a>' + \\\n '</div><br />'\n\n if perm_del.is_met(request.environ):\n value += '<div><form method=\"POST\" action=\"./' + str(obj.id_rol) + '\" class=\"button-to\">'+\\\n '<input type=\"hidden\" name=\"_method\" value=\"DELETE\" />' +\\\n '<input onclick=\"return confirm(\\'Está seguro?\\');\" value=\"Eliminar\" type=\"submit\" '+\\\n 'style=\"background-color: transparent; float:left; border:0; color: #286571;'+\\\n 'display: inline; margin: 0; padding: 0; margin-left:-3px;\" class=\"' + clase + '\"/>'+\\\n '</form></div><br />'\n value += '</div>'\n return value", "def select_action(self, state,allowExploration=True):\n actions = self.environment.all_actions(forExploration=allowExploration)\n #Returns any action\n return actions[0]", "def action(self):\n return self.rowTime.activity", "def actions(self):\n return self._actions", "async def actions(\n self,\n *,\n query_params: Optional[Dict[str, any]] = None,\n headers: Optional[Dict[str, str]] = None,\n ) -> AuditLogsResponse:\n return await self.api_call(\n path=\"actions\",\n query_params=query_params,\n headers=headers,\n )", "def actions(self):\r\n return self.puzzle.actions", "def action(action_id):\n action = Action.query.filter_by(\n id=action_id, username=current_user.username\n ).first_or_404()\n return jsonify(\n dict(\n action_id=action.id,\n action_name=action.name,\n action_type=action.type.value,\n details=action.details,\n )\n )", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def get_action(self):\n raise NotImplementedError", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def demonstrate(self,**kwargs):\n\n members = self.bl.getAllSavedActions()\n entries={}\n\n for param in members:\n entries[str(param)] = self.executeAction # save param names in entries\n\n# entries['search colour for position'] = self.search_menu\n entries['move block to position'] = self.move_block_menu\n entries['move arm to position'] = self.move_menu\n self.mm.addGenericMenu(\"actionMenu\",self.mm.cur_page,\"Select the action to demonstrate\", entries)\n self.mm.loadMenu(\"actionMenu\")", "def get_actions(self):\n return self.agent.get_actions()", "def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')", "def view(self):", "def action(ctx, getlist, getid, token, proxy, tablefmt):\n\tif not getlist and not getid:\n\t\treturn click.echo(ctx.get_help())\n\n\toption_list = ['getlist', 'getid']\n\n\tif validate(ctx.params, option_list):\n\t\tif getlist:\n\t\t\tpage = 1\n\t\t\thas_page = True\n\t\t\twhile has_page:\n\t\t\t\tresult = invoke_list(token, proxy, page)\n\t\t\t\tif result['has_error']:\n\t\t\t\t\tclick.echo()\n\t\t\t\t\tclick.echo('Error: %s' %(result['error_message']))\n\t\t\t\t\thas_page = False\n\t\t\t\telse:\n\t\t\t\t\trecord = 'action'\n\t\t\t\t\theaders = ['Fields', 'Values']\n\t\t\t\t\tfor dic in result['actions']:\n\t\t\t\t\t\ttable = [['Id', dic['id']], ['Status', dic['status']], \n\t\t\t\t\t\t['Type', click.style(dic['type'], fg='blue')], \n\t\t\t\t\t\t['Started at', dic['started_at']], \n\t\t\t\t\t\t['Completed at', dic['completed_at']], \n\t\t\t\t\t\t['Resource id', dic['resource_id']], \n\t\t\t\t\t\t['Resource type', dic['resource_type']], \n\t\t\t\t\t\t['Region', dic['region']['name']], \n\t\t\t\t\t\t['Size', dic['region']['sizes'][0]]]\n\t\t\t\t\t\tdata = {'headers': headers, 'table_data': table}\n\t\t\t\t\t\tprint_table(tablefmt, data, record)\n\t\t\t\t\ttotal = 'Total results: %d' % (result['meta']['total'])\n\t\t\t\t\tclick.echo()\n\t\t\t\t\tclick.echo(total)\n\t\t\t\t\tif result['links']['pages'].has_key('next'):\n\t\t\t\t\t\tpage += 1\n\t\t\t\t\t\tvalue = click.prompt('Do you want to continue ?', type=str, default='n')\n\t\t\t\t\t\tif value.lower() != 'y':\n\t\t\t\t\t\t\thas_page = False\n\t\t\t\t\telse:\n\t\t\t\t\t\thas_page = False\n\n\t\tif getid:\n\t\t\tmethod = 'GET'\n\t\t\turl = ACTION_LIST + str(getid)\n\t\t\tresult = DigitalOcean.do_request(method, url, token=token, proxy=proxy)\n\t\t\tif result['has_error']:\n\t\t\t\tclick.echo()\n\t\t\t\tclick.echo('Error: %s' %(result['error_message']))\n\t\t\t\thas_page = False\n\t\t\telse:\n\t\t\t\trecord = 'action'\n\t\t\t\theaders = ['Fields', 'Values']\n\t\t\t\tdic = result['action']\n\t\t\t\ttable = [['Id', dic['id']], ['Status', dic['status']], \n\t\t\t\t['Type', click.style(dic['type'], fg='blue')], \n\t\t\t\t['Started at', dic['started_at']], \n\t\t\t\t['Completed at', dic['completed_at']], \n\t\t\t\t['Resource id', dic['resource_id']], \n\t\t\t\t['Resource type', dic['resource_type']], \n\t\t\t\t['Region', dic['region']['name']], \n\t\t\t\t['Size', dic['region']['sizes'][0]]]\n\t\t\t\tdata = {'headers': headers, 'table_data': table}\n\t\t\t\tprint_table(tablefmt, data, record)", "def actions(self) -> list:\n if self.debug: print(f\"AState.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._actions = self._generate_actions()\n self._examined = True\n return self._actions", "def list(self):\n\n return list(\n filter(\n lambda x: x.get('type') != 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )", "def get_queryset(self):\n qs = super(JobActiveMixin, self).get_queryset()\n return qs.actives()" ]
[ "0.6613559", "0.63822025", "0.6160403", "0.61327785", "0.61140096", "0.5988209", "0.5908296", "0.5907843", "0.5899233", "0.5892363", "0.5886298", "0.58746743", "0.58155996", "0.57719344", "0.5768285", "0.5741835", "0.5715916", "0.57119167", "0.5709055", "0.5664173", "0.5616768", "0.5592838", "0.55697787", "0.55644953", "0.5532066", "0.55105495", "0.5503542", "0.5502318", "0.5494629", "0.5478764", "0.54710585", "0.54701936", "0.5457258", "0.5457258", "0.5449723", "0.5424349", "0.5424237", "0.541541", "0.541162", "0.54042816", "0.5402102", "0.53899425", "0.53825784", "0.5378722", "0.53704995", "0.53664243", "0.536549", "0.5356949", "0.5322931", "0.53100276", "0.5277713", "0.5269472", "0.52646655", "0.5256126", "0.52499527", "0.5245638", "0.52418345", "0.52350223", "0.52309924", "0.52294314", "0.52281296", "0.5225147", "0.5224514", "0.5221559", "0.52190554", "0.521725", "0.5201567", "0.51972264", "0.51825994", "0.518033", "0.5177773", "0.5169979", "0.5166419", "0.5163351", "0.5163332", "0.5161911", "0.5157091", "0.5142279", "0.5133766", "0.5132669", "0.5126568", "0.5123796", "0.5119543", "0.5118205", "0.5116379", "0.51105064", "0.51068425", "0.51032406", "0.5100708", "0.5092418", "0.5091666", "0.5088608", "0.50882095", "0.5079284", "0.5078552", "0.5075136", "0.5075019", "0.5074341", "0.5069348", "0.50684714" ]
0.8160547
0
View for editing action
def action_edit(request, action_id): employee = request.user.employee_user.first() action = Action.objects.get(pk=action_id) if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk: raise PermissionDenied() # if request.method == 'POST': form = ActionForm(request.POST, instance=action) if form.is_valid(): form.save(request.user, employee) return HttpResponseRedirect('/action/%d' % form.instance.pk) # else: # form = ActionForm(instance=action) # return TemplateResponse( # request, # 'mus/action_edit.html', # dict( # form=form, # edit=True # ) # ) # return JsonResponse(status=200, data={"data": form.instance.title, "edit": True})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit(self):\n\n pass", "def edit(self, **kwargs):\n ...", "def edit_form():\n return template (\"edit\")", "def edit():", "def edit_person(self, pk):", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')", "def edit(request):\n if 'image_id' not in request.GET:\n return HttpResponseRedirect('/imgmanip')\n image_id = request.GET['image_id']\n image = get_object_or_404(Image, pk=image_id)\n return render(request, 'imgmanip/edit.html', {'image': image, 'image_id': image_id})", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def document_edit_view(document_id):\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n return render_template('admin/documents/edit.html', document=doc, path='/admin/documents')", "def task_edit(request, pk):\n task_manager = TaskManager.objects.get(id=pk)\n task = task_manager.task\n if request.method == 'POST':\n \ttask_form = TaskForm(request.POST)\n \ttask_owner = request.user\n\n \tif task_form.is_valid():\n \t\ttask_name = task_form.cleaned_data.get('task_name')\n \t\ttask_description = task_form.cleaned_data.get('task_description')\n\n \t\tif task_manager.task_owner == task_owner:\n \t\t\ttask.task_name = task_name\n \t\t\ttask.task_description = task_description\n \t\t\ttask.save()\n \t\t\treturn redirect('task_list')\n else:\n \tform = TaskForm(instance=task)\n\n context = {'form': form, 'task_manager':task_manager}\n return render(request, 'tasker/task_edit.html', context)", "def getEditForm( self ):\n return \"listc_edit\"", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n \n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def edit(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"../\"\n \n pp = PoseePermiso('redefinir tipo item',\n id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value( \\\n values={'id_atributos_por_tipo_item': int(args[0])})\n value['_method'] = 'PUT'\n page = \"Atributo {nombre}\".format(nombre=value[\"nombre\"])\n return dict(value=value, \n page=page, \n atras=url_action)", "def edit(request,entry_id):\n assert isinstance(request, HttpRequest)\n try:\n entry = Entry.objects.get(pk=entry_id)\n except Entry.DoesNotExist:\n raise Http404(\"指定されたブログが存在しません。\")\n if not request.user or request.user.pk != entry.member.pk: # ブログ作成者以外は編集できない\n return HttpResponseForbidden() #アドレスをコピペしなければ通常は起こらないため例外処理で済ませておく。\n\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST, instance = entry) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n form.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm(instance = entry) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新',\n 'entry_pk':entry.pk,\n 'current_user':request.user,\n })", "def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())", "def showEditContact(self):", "def edit(request, observation_id, summary_id):\n\n if request.method == 'POST':\n if observation_id and summary_id:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n form = Observation(request.POST,instance=o)\n else:\n form = Observation(request.POST)\n if form.is_valid():\n form.save()\n return render_to_response(\"obsform_form.html\",\n {'form': form,\n 'success' : 'Your observation was saved'},\n context_instance=RequestContext(request))\n else:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n\n form = Observation(instance=o)\n\n return render_to_response('obsform_form.html', {'form' : form},\n context_instance=RequestContext(request))", "def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def get_edit_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv", "def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)", "def edit(slug):\n entry = get_object_or_404(Entry, Entry.slug == slug)\n if request.method == 'POST':\n if request.form.get('title'):\n entry.title = request.form.get('title')\n if request.form.get('content'):\n entry.content = request.form.get('content')\n entry.published = request.form.get('published') or False\n entry.save()\n\n flash('Entry saved successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n return render_template('edit.html', entry=entry)", "def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def edit(request, pk):\n obj = get_object_or_404(Book, id=pk)\n form = BookForm(request.POST or None, request.FILES or None, instance=obj)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'Your password was updated successfully!')\n return redirect('book_list')\n return render(request, 'upload_book.html', {'form': form})", "def edit(self, id=None):\n rock_q = model.meta.Session.query(model.Rock)\n c.rock = rock_q.filter_by(id=id).first()\n if c.rock:\n return render('/derived/rock/edit.mako')\n else:\n abort(404)", "def object_edit(request, simulation, object_name):\n # Object is either 'centroid', 'crossing', 'link' or 'function'.\n # Create a formset to edit the objects.\n formset = gen_formset(object_name, simulation)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n 'formset': formset,\n }\n return render(request, 'metro_app/object_edit.html', context)", "def edit(self, id, *args, **kw):\n atras = \"/rolesplantilla/\"\n if (not kw['contexto']):\n redirect('../')\n elif (kw['contexto'] == \"proyecto\"):\n selector = SelectorPermisosPlantillaProy\n elif (kw['contexto'] == \"fase\"):\n selector = SelectorPermisosPlantillaFase\n elif (kw['contexto'] == \"ti\"):\n kw[\"contexto\"] = u\"Tipo de Ítem\"\n selector = SelectorPermisosPlantillaTi\n \n self.edit_form = RolPlantillaEditForm(DBS=DBSession, selector=selector) \n tmpl_context.widget = self.edit_form\n rol_plantilla_edit_form = self.edit_form\n \n \n page=u\"Editar Rol Plantilla de {contexto}\".format(contexto=kw['contexto'])\n \n value = self.edit_filler.get_value(values={'id_rol': int(id)})\n \n #agregado\n if value[\"tipo\"].find(\"Plantilla\") < 0:\n page=u\"Editar Rol de {contexto}\".format(contexto=kw['contexto'])\n atras = \"/roles/\"\n \n return dict(value=value, page=page, atras=atras)", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)", "def edit(request, pk):\n template_var = base_template_vals(request)\n user = template_var[\"u\"]\n event = Event.objects.get(id=pk)\n template_var[\"e\"] = event\n if user.is_superuser or user.is_moderator:\n if request.method == 'POST':\n title = request.POST['title']\n refer = request.POST['refer']\n date = request.POST['date']\n time = request.POST['time']\n loc = request.POST['loc']\n body = request.POST['body']\n \n # Deal with time field\n try:\n event_datetime = date + ' ' + time\n print event_datetime\n event_datetime = datetime.strptime(event_datetime, '%m/%d/%Y %H:%M')\n except ValueError:\n print \"Error when processing time field\"\n \n # Deal with tags checkbox list\n tags = request.POST.getlist(\"tags\") \n if len(tags) == 0:\n event.tags.add(\"untagged\")\n else:\n taglist = list(tags)\n for t in taglist:\n event.tags.add(t)\n \n event.title = title\n event.refer = refer\n event.event_time = event_datetime\n event.location = loc\n event.body = body\n event.save() \n return single(request, pk)\n return render_to_response(\"event/event_edit.html\", template_var,\n context_instance=RequestContext(request))\n else :\n return redirect('index')", "def editar_empresa(id):\n cadastrando_empresa = False\n\n empresa = Empresa.query.get_or_404(id)\n form = EditarEmpresaForm(obj=empresa)\n\n if form.validate_on_submit():\n empresa.nome = form.nome.data\n empresa.simbolo = form.simbolo.data\n empresa.regiao = form.regiao.data\n empresa.tipo = form.tipo.data\n empresa.abertura = form.abertura.data\n empresa.fechamento = form.fechamento.data\n empresa.zona = form.zona.data\n empresa.moeda = form.moeda.data\n db.session.commit()\n flash('Empresa editada com sucesso!')\n\n return redirect(url_for('home.listar_empresas'))\n\n form.nome.data = empresa.nome\n form.simbolo.data = empresa.abertura \n form.regiao.data = empresa.regiao\n form.tipo.data = empresa.tipo\n form.abertura = empresa.abertura\n form.fechamento = empresa.fechamento\n form.zona.data = empresa.zona\n form.moeda.data = empresa.moeda\n\n\n return render_template('home/empresa.html', action=\"Edit\",\n cadastrando_empresa=cadastrando_empresa, form=form,\n empresa=empresa, title=\"Editar empresa\")", "def view_edit_pet(id):\n pet = Pet.query.get_or_404(id)\n form = PetEditForm(obj=pet)\n if form.validate_on_submit():\n form.populate_obj(pet)\n db.session.commit()\n\n flash(f\"Updated {pet.species} named {pet.name}\")\n return redirect(f'/{id}')\n else:\n return render_template(\"pet_edit_form.html\", form=form, pet=pet)", "def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def edit_parterre(id):\n parterre = get_parterre(id)\n form = ParterreForm(parterre)\n return render_template(\"create-parterre.html\",\n title= parterre.get_name()+\" - edit\",\n form = form,\n parterre = parterre,\n param = \"modif\")", "def edit(request, id, model, decorator = lambda x:x,\r\n post_save_redirect='', template_name=''):\r\n record = get_or_404(request, model, id)\r\n \r\n FormClass = decorator(\r\n forms.form_for_instance(\r\n record,\r\n fields = get_allowed_fields(request, model),\r\n ), \r\n request,\r\n instance = record\r\n )\r\n \r\n template_name = template_name or _make_template_name(model, 'form')\r\n\r\n #import pdb; pdb.set_trace()\r\n if request.method == 'POST':\r\n form = FormClass(request.POST)\r\n if form.is_valid():\r\n record = form.save()\r\n return HttpResponseRedirect(\r\n post_save_redirect or record.get_absolute_url()\r\n )\r\n else:\r\n form = FormClass()\r\n return render_to_response(\r\n template_name,\r\n context_instance = RequestContext(\r\n request,\r\n {\r\n 'form': form,\r\n }\r\n )\r\n )", "def edit(request, company_id=None):\n if company_id:\n company = get_object_or_404(Company, id=company_id)\n if request.POST and company.owner == request.user:\n form = CompanyForm(request.POST, instance=company)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/companies')\n if company.owner != request.user:\n return HttpResponseForbidden()\n form = CompanyForm(instance=company)\n context = dict(form=form)\n return render(request, 'companies/edit.html', context)\n else:\n companies = Company.objects.filter(owner=request.user)\n context = dict(companies=companies)\n return render(request, 'companies/companies_by_user.html', context)", "def edit(request, article_id):\n try:\n article = Article.objects.get(pk=article_id)\n except Article.DoesNotExist:\n raise Http404(\"Article does not exist\")\n if request.method == 'POST': # フォームが提出された\n form = ArticleForm(request.POST, instance = article) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n article = form.save(commit=False)\n if form.cleaned_data['no_expired_at'] is True:\n article.expired_at = None\n article.save()\n return HttpResponseRedirect(reverse('article_list')) # POST 後のリダイレクト\n else:\n no_expired_at = False\n if article.expired_at is None:\n no_expired_at = True\n article.expired_at = datetime.now() + timedelta(days=1)\n form = ArticleForm(instance = article, initial = {'no_expired_at': no_expired_at, }) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/article_edit.html', { \n 'form': form,\n 'title':'ニュース記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新する',\n 'article_pk':article.pk,\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def register_edit_view(self, blueprint):\n view = apply_decorators(self.edit_view, self.edit_decorators)\n blueprint.add_url_rule(\n self.edit_rule, self.edit_endpoint, view, methods=['GET', 'POST'])", "def edit_template(self):\n return '{}/{}.html'.format(self.object_name, self.edit_endpoint)", "def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })", "def edit_entry(request, entry_id):\n entry= Entry.objects.get(id= entry_id)\n stock= entry.stock\n\n if request.method != 'POST':\n #initial request; pre-fill form with the current note entry.\n form= EntryForm(instance=entry)\n else:\n # POST data submitted; process data.\n form= EntryForm(instance=entry, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('stock_trackers:stock', stock_id=stock.id)\n\n context= {'entry': entry, 'stock': stock, 'form': form}\n return render(request, 'stock_trackers/edit_entry.html', context)", "def edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)", "def edit(request, pk):\n\n try:\n object = User.objects.get(pk=pk)\n except:\n object = User()\n\n if request.method == 'POST': # If the form has been submitted...\n form = UserForm(request.POST, instance=object)\n\n if form.is_valid(): # If the form is valid\n object = form.save()\n\n messages.success(request, _('The user has been saved.'))\n\n return redirect('users.views.list')\n else:\n form = UserForm(instance=object)\n\n return render(request, 'users/users/edit.html', {'form': form})", "def edit_car_view():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n car_id = request.args.get('car-id', None)\n edit_mode_string = request.args.get('edit', None)\n if edit_mode_string == 'true':\n edit_mode = True\n else:\n edit_mode = False\n car = get_car_identified_by_id(car_id)\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('cars_manager.html', user=user_id, session_id=session_id, car=car, edit_mode=edit_mode,\n current_year=get_current_year())\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def careerCatagory_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n page_index = tool.get_param_by_request(request.GET, 'page_index', 1, int)\r\n\r\n careerCatagory = None\r\n if action == \"edit\" or action == \"show\":\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n careerCatagory = api_careerCatagory.get_career_catagory_by_id(_id)\r\n\r\n if careerCatagory.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n\r\n careerCatagory = careerCatagory.result()[0]\r\n\r\n c = {\"careerCatagory\": careerCatagory, \"action\": action, \"page_index\": page_index}\r\n\r\n return render_to_response(\"mz_course/careerCatagory_save.html\", c, context_instance=RequestContext(request))", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)", "def edit_task(self,tid, **kwargs):\n self.task_controller.edit(tid, **kwargs)", "def edittask_view(request, task_id):\n\n # Use to tell to the template tha the user want to edit an already existing task\n is_new = False\n\n # Retrieve the task, raise an error if the task does not exist\n task = get_object_or_404(Task, id=task_id)\n project = task.projet\n # Check if logged in user is allowed to modify the task\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Check if the form has been submitted\n if request.method == \"POST\":\n form = TaskForm(project, request.POST)\n if form.is_valid():\n task = form.save(commit=False)\n # Manually set the project id. Otherwise a new task would be created\n task.id = task_id\n task.last_modification = datetime.datetime.now()\n task.save()\n\n return redirect(\"task\", task_id=task.id)\n else:\n # Initialize the form with the task\n form = TaskForm(project, instance=task)\n else:\n return redirect(\"projects\")\n return render(request, \"newtask.html\", locals())", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def edit_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n db.execute('update entries set title = ?, ingredients = ?, \\\n steps = ?, tags = ?, url = ? where id = ?',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url'], request.form['id']])\n db.commit()\n flash('Entry ' + id + ' has been modified.', 'success')\n return view_entry(str(id))\n else:\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries where id = ? order by id desc',\n [id.strip()])\n entries = cur.fetchall()\n return render_template('edit_entry.html', entries=entries)", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def show_edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('edit-post.html', post=post)", "def edit_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n form = ActorCommentForm()\n if request.method == \"POST\":\n form = ActorCommentForm(request.POST, instance=comment)\n\n if form.is_valid():\n form.save()\n url = '../../' + str(comment.actor.pk)\n return redirect(url)\n\n context = {\n 'form': form,\n 'comment': comment,\n }\n return render(request, context)", "def edit_plante(id):\n plante = get_plante(id)\n form = PlanteForm(plante)\n return render_template(\n \"create-plante.html\",\n title = plante.get_name()+\" - edit\",\n form = form,\n plante = plante,\n param = \"modif\")", "def simulation_view_edit(request, simulation):\n edit_form = BaseSimulationForm(data=request.POST, instance=simulation)\n if edit_form.is_valid():\n edit_form.save()\n return HttpResponseRedirect(\n reverse('metro:simulation_view',\n args=(simulation.id,))\n )\n else:\n # Redirect to a page with the errors (should not happen).\n context = {\n 'simulation': simulation,\n 'form': simulation_form,\n }\n return render(request, 'metro_app/errors.html', context)", "def edit_recipe(request, recipe, **_kwargs):\n return edit_view(request, _(\"Recipe\"), RecipeForm, recipe)", "def is_edit(self):\n return self._tag == 'edit'", "def edit_task_page(request):\n data = {}\n try:\n tasklist = request.GET.get(\"tasklist\")\n task = request.GET.get(\"task\")\n data[\"tasklist\"] = tasklist\n\n task_obj = Todo.objects.get(title=task)\n data[\"data\"] = task_obj\n\n return render(request, \"pages/update-task.html\", data)\n except Exception as ex:\n return HttpResponse(ex)", "def edit(self, *args, **kw):\n pp = PoseePermiso('modificar rol')\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value(values={'id_rol': int(args[0])})\n page = \"Rol {nombre}\".format(nombre=value[\"nombre_rol\"])\n atras = self.action\n return dict(value=value, page=page, atras=atras)", "def DoEdit(self,event):\r\n raise UncodedError", "def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))", "def editPage(request, title):\n entry = util.get_entry(title)\n if request.method == \"POST\":\n # check if the data is valid then save/replace old data\n form = editPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"editTitle\"]\n content = form.cleaned_data[\"editBody\"]\n\n util.save_entry(title, content)\n\n # take user to their editted page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # give user a editting form with existing data filled in by defult. \n else:\n editForm = editPageForm(initial={\n \"editTitle\": title,\n \"editBody\": entry\n })\n editFormTitle = editForm[\"editTitle\"]\n editFormBody = editForm[\"editBody\"]\n return render(request, \"encyclopedia/editPage.html\", {\n \"formTitle\": editFormTitle,\n \"formBody\": editFormBody\n })", "def edit(self, name=UNSPECIFIED, extraParams={}):\n import labstep.entities.resource.repository as resourceRepository\n\n return resourceRepository.editResource(self, name, extraParams=extraParams)", "def show_pet_with_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = PetFormEdit(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n \n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet.html', pet=pet, form=form)", "def edit_question(request, question_id):\n question = get_object_or_404(Question, id=question_id)\n if auth.can_edit_post(request.user, question):\n return _edit_question(request, question)\n elif auth.can_retag_questions(request.user):\n return _retag_question(request, question)\n else:\n raise Http404", "def show_edit_post(post_id):\r\n post = Post.query.get_or_404(post_id)\r\n tags = Tag.query.all()\r\n return render_template('edit-post.html', post=post, tags=tags)", "def edit_item(Task_id):\n\n if request.method == 'POST':\n #get the parameters from html form\n Description = request.form['Description']\n status = request.form['status']\n Due_date = request.form['Due_date']\n\n #if status of task is open then it will set the value=1\n if status == 'open':\n status = 1\n else:\n status = 0\n\n #conncetion to the database\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n\n #update query to update fields of task\n c.execute(\"UPDATE task SET Description = ?, Due_date = ?, Modified_date = Date('now'), status = ? WHERE Task_id LIKE ?\", (Description, Due_date, status, Task_id))\n conn.commit()\n return redirect(\"/todo\")\n else:\n Task_id = str(Task_id)\n #conncetion to the database\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n\n #select query to get the Description of particular task\n c.execute(\"SELECT Description,Due_date FROM task WHERE Task_id = ?\",[Task_id])\n cur_data = c.fetchone()\n conn.commit()\n return render_template('update_task.html',Task_id= Task_id,old= cur_data)", "def edit_record(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member.\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member,\n project = get_object_or_404(models.Project, slug=slug)\n record = get_object_or_404(models.Record, pk=pk)\n pm = ProjectMember.objects.get(user=request.user, project=project)\n\n # Access control.. if not owner or editor - access denied.\n if pm.is_owner or pm.is_editor:\n # User has access\n if request.method == 'POST':\n # User submits data\n form1 = forms.GeneralRecordForm(request.POST)\n form2 = forms.SpecificRecordForm(request.POST, entry=request.POST['entry_type'])\n context = {\n 'form1':form1,\n 'project':project,\n 'form':form2,\n }\n if form2.is_valid() and form1.is_valid():\n fields = [f.name for f in models.Record._meta.get_fields()]\n data1 = form1.clean()\n data2 = form2.clean()\n # Additional form validation.\n if data1['entry_type'] == 'book':\n if data2['author']== '' and data2['editor'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Author or Editor\"\n return render(request, 'records/record_edit.html', context)\n elif data1['entry_type'] == 'inbook':\n if data2['author'] == '' and data2['editor'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Author or Editor\"\n return render(request, 'records/record_edit.html', context)\n elif data2['chapter'] == '' and data2['pages'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Chapter or Pages\"\n return render(request, 'records/record_edit.html', context)\n # Form is valid .. save into new record\n # making sure no one has edited the record while session is running\n if record.last_edited.__str__() == request.COOKIES.get('last_edited'):\n # No conflict, go on save changes.\n record.entry_type = data1['entry_type']\n record.cite_key = data1['cite_key']\n record.project = project\n for fieldname in fields:\n if fieldname in data2:\n setattr(record, fieldname, data2[fieldname])\n record.last_edited = timezone.now()\n record.save()\n # Send user back to project detail, the overview of all records in the project.\n return redirect('projects:single', slug=slug)\n else:\n # someone changed the record before the user managed to save\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'old_record':record,\n 'form1':form1,\n 'project':project,\n 'form':form2,\n 'data':data\n }\n # send user to the conflict page.\n return render(request, 'records/record_conflict.html', context)\n\n else:\n # Form is not valid\n context = {\n 'form1':form1,\n 'project':project,\n 'form':form2,\n 'err':True\n }\n return render(request, 'records/record_edit.html', context)\n else:\n # User hasn't submitted any data yet\n # Form filled in with data for selected record.\n form1 = forms.GeneralRecordForm(data=model_to_dict(record))\n form2 = forms.SpecificRecordForm(data=model_to_dict(record),entry=record.entry_type)\n context = {\n 'form1':form1,\n 'form2':form2,\n 'project':project,\n 'record':record\n }\n # Create response in order to set cookie\n response = render(request, 'records/record_edit.html', context)\n # set cookie to enable later check for conlfict\n response.set_cookie('last_edited', record.last_edited.__str__())\n return response\n else:\n # Access denied.\n return HttpResponse(\"You don't have the permission to do this\")", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def edit_item(action, user):\n def get_item(items, id):\n for item in items:\n if item.id == id:\n return item \n raise Item.DoesNotExist()\n \n l = List.objects.get(id=action.get('listId', None))\n verify_permission(l, user)\n \n editable_attributes = ('position', 'description', 'crossed')\n \n try:\n item = get_item(l.items, action['what']['id'])\n except:\n raise Item.DoesNotExist\n \n for key, value in action['what'].iteritems():\n if key == 'id': continue\n elif key in editable_attributes:\n item.__setattr__(key, value)\n l.save()\n \n return l", "def edit(request):\n issue = request.issue\n base = issue.base\n\n if request.method != 'POST':\n reviewers = [models.Account.get_nickname_for_email(reviewer,\n default=reviewer)\n for reviewer in issue.reviewers]\n ccs = [models.Account.get_nickname_for_email(cc, default=cc)\n for cc in issue.cc]\n form = EditLocalBaseForm(initial={'subject': issue.subject,\n 'description': issue.description,\n 'base': base,\n 'reviewers': ', '.join(reviewers),\n 'cc': ', '.join(ccs),\n 'closed': issue.closed,\n 'private': issue.private,\n })\n return respond(request, 'edit.html', {\n 'issue': issue,\n 'form': form,\n 'offer_delete': (issue.owner == request.user\n or auth_utils.is_current_user_admin())\n })\n\n form = EditLocalBaseForm(request.POST)\n\n if form.is_valid():\n reviewers = _get_emails(form, 'reviewers')\n\n if form.is_valid():\n cc = _get_emails(form, 'cc')\n\n if not form.is_valid():\n return respond(request, 'edit.html', {'issue': issue, 'form': form})\n cleaned_data = form.cleaned_data\n\n was_closed = issue.closed\n issue.subject = cleaned_data['subject']\n issue.description = cleaned_data['description']\n issue.closed = cleaned_data['closed']\n issue.private = cleaned_data.get('private', False)\n base_changed = (issue.base != base)\n issue.base = base\n issue.reviewers = reviewers\n issue.cc = cc\n if base_changed:\n for patchset in issue.patchsets:\n ndb.transaction(lambda: _delete_cached_contents(list(patchset.patches)))\n issue.calculate_updates_for()\n issue.put()\n\n return HttpResponseRedirect(reverse(show, args=[issue.key.id()]))", "def word_sets_edit(request, pk=None):\n word_set = get_object_or_404(WordSet, pk=pk, user=request.user) if pk else None\n edit_mode = True if word_set else False\n form = WordSetForm(request.POST or None, request.FILES or None,\n instance=word_set, user=request.user)\n if form.is_valid():\n form.save()\n if edit_mode:\n return redirect(word_sets_detail, pk=pk)\n redirect(word_sets_list)\n return render(request, 'word_sets_edit.html', {'form': form, 'edit_mode': edit_mode})", "def edit_button_clicked(self, obj):\n handle = self.get_selected()\n if handle:\n note = self.dbstate.db.get_note_from_handle(handle)\n try:\n from .. import EditNote\n EditNote(self.dbstate, self.uistate, self.track, note,\n callertitle = self.callertitle,\n extratype = [self.notetype] )\n except WindowActiveError:\n pass", "def _edit_command(self, lib, opts, args):\n # Get the objects to edit.\n query = ui.decargs(args)\n items, albums = _do_query(lib, query, opts.album, False)\n objs = albums if opts.album else items\n if not objs:\n ui.print_('Nothing to edit.')\n return\n\n # Get the fields to edit.\n if opts.all:\n fields = None\n else:\n fields = self._get_fields(opts.album, opts.field)\n self.edit(opts.album, objs, fields)", "def url_to_edit(obj):\n return reverse(\n 'admin:%s_%s_change' % (obj._meta.app_label, obj._meta.model_name),\n args=[obj.id]\n )", "def show_and_edit_pet_page(pet_id):\n \n pet = Pet.query.get(pet_id)\n\n form = EditPetPage(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n return redirect('/')\n\n else:\n return render_template('display_pet.html', pet=pet, form=form)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"users/edit_user.html\", user=user)", "def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))", "def edit_record(self, record):\r\n self.record.editObject(record, id=record['id'])", "def get(self,request,*args,**kwargs):\n\t\tuser_form = UserUpdateForm(instance=request.user)\n\t\tpersona_form = PersonaUpdateForm(instance=request.user.persona)\n\t\tuser_password_update_form = UserPasswordUpdateForm(user=request.user)\n\n\t\tcontext = {\n\t\t'user_form':user_form,\n\t\t'persona_form':persona_form,\n\t\t'user_password_update_form':user_password_update_form\n\t\t}\n\t\treturn render(request, 'cuenta/editar.html', context)", "def edit(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n form = EditForm(obj=user)\n form.email.data = user.email\n\n if form.validate_on_submit():\n password = form.password.data\n username = form.username.data\n\n save_result = edit_user(user_id, password, username, user.active)\n user = save_result['entry']\n form = EditForm(request.form, obj=save_result['entry'])\n form.email.data = user.email\n return redirect(url_for('.index'))\n \n return render_template('users/edit.html'\n ,form=form\n ,user=user\n ,t=t\n ,m=m)", "def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})", "def show_edit_pet(pet_id):\n pet = Pet.query.get(pet_id)\n form = EditPet(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template('edit_pet.html', form=form, pet=pet)", "def edit_post(request, year, month, day, slug):\n post = get_model_for_date_and_slug(Post, year, month, day, slug)\n form = PostForm(instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save()\n if \"continue_editing\" in request.POST:\n return http.HttpResponseRedirect(post.get_edit_url())\n return http.HttpResponseRedirect(post.get_absolute_url())\n return render_to_response(\"montgomery/edit_post.html\", {\"form\": form}, context_instance=RequestContext(request))", "def transaction_edit(request, transaction_id, model_class=Transaction, form_class=TransactionForm, template_name='budget/transactions/edit.html'):\n transaction = get_object_or_404(model_class.active.all(), pk=transaction_id)\n if request.POST:\n form = form_class(request.POST, instance=transaction)\n \n if form.is_valid():\n category = form.save()\n return HttpResponseRedirect(reverse('budget_transaction_list'))\n else:\n form = form_class(instance=transaction)\n return render_to_response(template_name, {\n 'transaction': transaction,\n 'form': form,\n }, context_instance=RequestContext(request))", "def player_edit(player_id):\n if request.method == 'GET':\n result = {}\n player = Player.query.filter_by(player_id=player_id).one()\n player.player_image = b64encode(player.player_image)\n teams = get_team()\n result['player'] = player\n result['teams'] = teams\n return render_template('edit_player_info.html', results=result)", "def edit_object(obj):\n return __EditMode(obj)", "def edit(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n title = self.request.get('title')\n if not title:\n return JSONResponse(self.request).error(\n _('agenda_item_update_empty_string',\n default=u\"Agenda Item title must not be empty.\")).proceed().dump()\n\n title = title.decode('utf-8')\n if self.agenda_item.has_proposal:\n if len(title) > ISubmittedProposal['title'].max_length:\n return JSONResponse(self.request).error(\n _('agenda_item_update_too_long_title',\n default=u\"Agenda Item title is too long.\")\n ).proceed().dump()\n\n self.agenda_item.set_title(title)\n return JSONResponse(self.request).info(\n _('agenda_item_updated',\n default=u\"Agenda Item updated.\")).proceed().dump()", "def show_edit_post_form(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('posts/edit.html', post=post)", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template('edit-user.html', user=user)", "def edit(article_id):\r\n response = table.get_item(\r\n Key={'article_id': article_id}\r\n )\r\n data = response.get('Item')\r\n\r\n if data is None:\r\n flash('Unable to get Article')\r\n return redirect(url_for('article.list'))\r\n\r\n form = ArticleForm(title=data.get('title'), description=data.get('description'))\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = article_id\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb to update table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully updated')\r\n return redirect(url_for('article.list'))\r\n \r\n return render_template('article/form.html', add_article=False,\r\n form=form, title='Edit Article', article_id=article_id)", "def view_user_edit(self):\n\n logged_in = authenticated_userid(self.request)\n message = ''\n form = Form(self.request, schema=UserEditSchema,\n state=State(request=self.request))\n if form.validate():\n password = self.request.params['password']\n if self.context.validate_password(password):\n if self.request.params['new_password']:\n password = self.request.params['new_password']\n message = 'Successfully saved'\n email = self.request.params['email']\n self.context.edit(password, email)\n else:\n message = msg['password_invalid']\n return {\n 'message': message,\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n 'form': FormRenderer(form),\n 'email': self.context.email\n }", "def show_edit_actor(self):\n\t\ttry:\n\t\t\tnombre = self.ui.lista_act.currentItem().text()\n\t\t\tformulario = view_form_actor.Form(self)\n\t\t\tformulario.edit(nombre)\n\t\t\tformulario.exec_()\n\t\t\tself.load_data()\n\t\texcept AttributeError as e:\n\t\t\terrorMessageBox = QtGui.QMessageBox.warning(self,\"Error\",\"Debe seleccionar un actor\")", "def edit_post(request, post_id):\n post = Post.objects.get(id=post_id)\n check_post_owner(request, post)\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('blogs:post', post_id=post.id)\n\n context = {'post': post, 'form': form}\n return render(request, 'blogs/edit_post.html', context)", "def _editClickedSlot(self):\r\n\r\n index = self.propertiesTableView.selectionModel().currentIndex()\r\n if index.isValid():\r\n self.propertiesTableView.edit(index)" ]
[ "0.80914015", "0.78752905", "0.7775213", "0.7495747", "0.74247116", "0.73104566", "0.7053273", "0.7040553", "0.7038563", "0.69779855", "0.6973604", "0.6886933", "0.68703216", "0.68322045", "0.67837024", "0.6779167", "0.67466205", "0.6741481", "0.6699304", "0.6675047", "0.66226476", "0.66194457", "0.6604886", "0.6591583", "0.6564938", "0.65423745", "0.65373164", "0.6525162", "0.65122426", "0.6512019", "0.6494448", "0.646755", "0.6444865", "0.64413315", "0.64314926", "0.64280015", "0.6398587", "0.63877517", "0.6377764", "0.63716036", "0.63690764", "0.6365352", "0.63519496", "0.6338616", "0.6332951", "0.6332343", "0.63249385", "0.6313264", "0.6308514", "0.62782705", "0.62634754", "0.6262538", "0.6253541", "0.6250768", "0.62436557", "0.6239793", "0.6237905", "0.623761", "0.62325484", "0.62300736", "0.6229617", "0.62294513", "0.62285715", "0.6227414", "0.62193185", "0.62158966", "0.6206563", "0.6203127", "0.6186639", "0.61855656", "0.61837363", "0.6178204", "0.61765915", "0.61755705", "0.61740017", "0.6162231", "0.6161971", "0.61580276", "0.61563", "0.6145863", "0.6139581", "0.6131741", "0.6130705", "0.61133033", "0.61092305", "0.6097757", "0.6094066", "0.6092942", "0.6086574", "0.6084901", "0.60829353", "0.6080215", "0.6078922", "0.6078327", "0.6072631", "0.6069311", "0.6064517", "0.6061696", "0.6060774", "0.6044666" ]
0.74116254
5
View for detail of action
def action_detail(request, action_id): employee = request.user.employee_user.first() action = Action.objects.get(pk=int(action_id)) # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk: if not employee.hasAccessTo(action.employee): raise PermissionDenied() if request.method == 'POST': form = ActionCommentForm(request.POST) if form.is_valid(): form.save(request.user, action) return HttpResponseRedirect('/action/%s' % action_id) else: form = ActionCommentForm() return TemplateResponse( request, 'mus/action_detail.html', dict( action=action, form=form ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return _action_args_dict[self.action].name", "def action(self):\n pass", "def action(self):\n pass", "def view(self):", "def get_action(self, context):\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "def __str__(self):\n return str(self.get_action_display())", "def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:\n return redirect(return_url)\n\n model = self.get_one(id)\n\n if model is None:\n flash(gettext('Record does not exist.'), 'error')\n\n if self.details_modal and request.args.get('modal'):\n template = self.details_modal_template\n else:\n template = self.details_template\n\n relationship_views = []\n for relationship in self.model_relationship_views:\n relationship_view = self.model_relationship_views[relationship]\n bp = relationship_view.blueprint\n endpoint = '{}.ajax_config'.format(relationship_view.blueprint.name)\n data = {\n 'field': relationship,\n 'title': relationship_view.title,\n 'config_url': self.get_url(endpoint, model_id=id)\n }\n relationship_views.append(data)\n\n return self.render(\n template,\n model=model,\n details_columns=self._details_columns,\n get_value=self.get_detail_value,\n relationship_views=relationship_views,\n return_url=return_url\n )", "def print_details(self):\n self.view.print_details()", "def detail(self, req):\n return self.index(req)", "def obtain_action(self):\r\n\t\treturn", "def action(action_id):\n action = Action.query.filter_by(\n id=action_id, username=current_user.username\n ).first_or_404()\n return jsonify(\n dict(\n action_id=action.id,\n action_name=action.name,\n action_type=action.type.value,\n details=action.details,\n )\n )", "def action(self) -> str:\n return pulumi.get(self, \"action\")", "def _action(self):\n pass", "def detail_view(self, request, pk):\n instance = self.get_object()\n if self.revision_wanted is not None:\n instance = get_object_or_404(\n instance.revisions, id=self.revision_wanted).as_page_object()\n elif self.is_preview:\n instance = instance.get_latest_revision_as_page()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)", "def actionURL(self):\n raise NotImplementedError()", "def action_type(self):", "def show(self):\n\n pass", "def show(self) -> None:", "def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)", "def action(self):\n return self._get_field(\"action\")", "def get_action(self):\n raise NotImplementedError", "def briefing_action(self, query):\n raise NotImplementedError()\n pass", "def getAction(self, state):\n util.raiseNotDefined()", "def getAction(self, state):\n util.raiseNotDefined()", "def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def action(self):\n return self._action", "def show(self):\n pass", "def action_summary(self, request, action_info):\n replicate_info = {\n 'volume_id': action_info['id'],\n 'replicate_status': action_info['replicate_status']\n }\n\n if 'snapshot_id' in action_info.keys():\n replicate_info['snapshot_id'] = action_info['snapshot_id']\n info = {\n 'replicate': replicate_info\n }\n return info", "def show(self):", "def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value", "def on_detail(self, request, board_id):\n detailed_info = {\n 'creator': self.redis.get('creator:board:' + board_id).decode('utf-8'),\n 'text': self.redis.get('board:' + board_id).decode('utf-8'),\n 'time': self.redis.get('time:board:' + board_id).decode('utf-8'),\n 'board_id': board_id\n }\n return self.render_template('details.html', detailed_info=detailed_info, comments=self.get_comments(board_id))", "def actor_detail(request, id_):\n\n template = \"actor_detail.html\"\n analyst = request.user.username\n (new_template, args) = get_actor_details(id_,\n analyst)\n if new_template:\n template = new_template\n return render_to_response(template,\n args,\n RequestContext(request))", "def get_details(self):", "def detail(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n user = request.user\n return render(request, 'kvent/event-detail.html', {'event': event, 'user': user})", "def detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'problemfinder/details.html', {'question': question})", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)", "def details(request):\n\treturn render(request, 'ExcelApp/main.html')", "def view(self):\n raise NotImplementedError", "def _show(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name)\n )", "def action(self,item):\r\n pass", "def _get_action(self):\n return self.__action", "def detail(request, article_id):\n return render(request, 'knowledgebase/detail.html', {'article_id': article_id})", "def get_action(self):\n return self.__action", "def case_detail_view(request, pk):\n issue = _get_issue(request, pk)\n tenancy = _get_tenancy(issue)\n notes = _get_issue_notes(request, pk)\n context = {\n \"issue\": IssueDetailSerializer(issue).data,\n \"tenancy\": TenancySerializer(tenancy).data,\n \"notes\": IssueNoteSerializer(notes, many=True).data,\n \"details\": _get_submitted_details(issue),\n \"actionstep_url\": _get_actionstep_url(issue),\n \"urls\": get_detail_urls(issue),\n \"permissions\": {\n \"is_paralegal_or_better\": request.user.is_paralegal_or_better,\n \"is_coordinator_or_better\": request.user.is_coordinator_or_better,\n },\n }\n return render_react_page(request, f\"Case {issue.fileref}\", \"case-detail\", context)", "def show(self, item_id):\n pass", "def post(self, request, pk):\n action_key = request.POST.get(\"action\")\n _, method = self.actions[action_key]\n getattr(self, method)()\n return HttpResponseRedirect(reverse(\"event_admin\", kwargs={\"pk\": pk}))", "def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})", "def details(self):\n pass", "def detail(): \n\n # get contentid\n content_id = request.args.get('contentid')\n\n # get shortest places\n title, places = get_shortest(content_id)\n print(content_id)\n\n return render_template('detail.html', \n title=title,\n content_id=content_id,\n places=places, \n count=len(places))", "def getAction1(self, state):\n util.raiseNotDefined()", "def get_action(self):\n return self.current_action", "def name(self):\n return \"action_news_cnn\"", "def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)", "def retrieve(self,request , pk=None):\r\n return Response({'HTTP method':'GET'})", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def action(self) -> Optional[str]:\n return pulumi.get(self, \"action\")", "def listing_view(self, request):\n self._object = self.get_page_for_url(request)\n if self._object is not None:\n self.kwargs.update({'pk': self._object.pk})\n # pylint: disable=attribute-defined-outside-init\n self.action = 'detail_view'\n return self.detail_view(request, pk=self._object.pk)\n return super().listing_view(request)", "def name(self):\n return \"action_news_abc\"", "def get(self):\n self.render('view.html')", "def select_action(self):\n pass", "def GetView(self):\r\n return self.model.GetView()", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )", "def choose_action(self):\r\n pass", "def post(self, request, *args, **kwargs):\n getattr(self, kwargs['action'])()\n return HttpResponse()", "def actions(self):\n raise NotImplementedError", "def overview():\n # TODO: fix ajax https://groups.google.com/d/msg/web2py/YyVilc2ywdg/ZLtN3Gg3Ft0J\n # TODO: fix ?plain link in results\n from plugin_introspect import get_task_code\n lesson = request.args[0] # controller with lesson contents\n # lesson = request.vars.lesson_controller # controller with lesson contents\n fun_names = exposed_functions_names( controller=lesson )\n exposed_functions = generate_exposed_functions_info( controller=lesson )\n examples_codes = [ get_task_code(code=exposed_functions[f]['code'], task_key=lesson+'/'+f, decorate=True) for f in fun_names ]\n results_urls = [ URL(lesson, f, vars=dict(plain=1)) for f in fun_names ]\n return response.render('tutor.html', dict(lesson=lesson, fun_names=fun_names, examples_codes=examples_codes, results_urls=results_urls) )", "def test_view_detail_as_method(self):\n url = reverse(\n \"django-admindocs-views-detail\",\n args=[\"django.contrib.admin.sites.AdminSite.index\"],\n )\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def action_spec(self):\r\n pass", "def __actions__(self, obj):\n\t\t\tprimary_fields \t= self.__provider__.get_primary_fields(self.__entity__)\n\t\t\tpklist \t\t= '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n\n\t\t\tvalue \t\t= '<div>'\n\t\t\tif has_permission('editar_LB'):\n\t\t\t\tvalue = value + '<div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a></div>'\n\t\t\tif has_permission('eliminar_LB'):\n\t\t\t\tvalue = value + '<div><form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\"><input type=\"hidden\" name=\"_method\" value=\"DELETE\" /><input class=\"delete-button\" onclick=\"return confirm(\\'Est&aacute; seguro que desea eliminar?\\');\" value=\"delete\" type=\"submit\" style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/></form></div>'\n\t\t\tvalue = value + '</div>'\n\t\t\treturn value", "def action_view_mo_delivery(self):\n self.ensure_one()\n action = self.env[\"ir.actions.actions\"]._for_xml_id(\"stock.action_picking_tree_all\")\n pickings = self.mapped('picking_ids')\n if len(pickings) > 1:\n action['domain'] = [('id', 'in', pickings.ids)]\n elif pickings:\n form_view = [(self.env.ref('stock.view_picking_form').id, 'form')]\n if 'views' in action:\n action['views'] = form_view + [(state,view) for state,view in action['views'] if view != 'form']\n else:\n action['views'] = form_view\n action['res_id'] = pickings.id\n action['context'] = dict(self._context, default_origin=self.name, create=False)\n return action", "def view(self) -> str:\n return pulumi.get(self, \"view\")", "def command_view(arguments):\n global current_mode, current_name\n current_mode = Mode.links\n current_name = arguments[0]\n return 'Now viewing entity \"' + current_name + '\"'", "def retrieve(self,request, pk = None):\n return Response({'http_method': 'GET'})", "def challenge_detail_view(request, pk):\n challenge = OffChallenge.objects.get(id=pk)\n officer_name = challenge.officer.get_full_name()\n requester_name = challenge.requester.get_full_name()\n\n # check whether the viewer of page is the officer who gave the challenge\n viewer_is_the_officer = challenge.officer == request.user\n # check whether the viewer of page is an officer\n if viewer_is_the_officer:\n review_link = request.build_absolute_uri(\n reverse(\"candidate:challengeconfirm\", kwargs={ 'pk' : pk }))\n else:\n review_link = None\n context = {\n \"challenge\" : challenge,\n \"officer_name\" : officer_name,\n \"requester_name\" : requester_name,\n \"viewer_is_the_officer\" : viewer_is_the_officer,\n # viewer_is_an_officer is already added as a context variable with a context processor\n \"review_link\" : review_link,\n }\n return render(request, \"candidate/challenge_detail.html\", context=context)", "def info(self, id):", "def getAction(self, state):\n raiseNotDefined()", "def actions():\n pass", "def show(self):\n raise NotImplementedError", "def show(self):\n raise NotImplementedError", "def view_animal(self):\n self._view_animal()", "def detail(request, target_id):\n temp_values = {\n \"subscroll\":True,\n }\n return render(request, 'server/detail.html', temp_values)", "def show(self):\n\t\traise NotImplementedError()", "def action_to_pretty_str(action) :\n raise NotImplementedError" ]
[ "0.67003417", "0.6600345", "0.6600345", "0.6525399", "0.6474398", "0.6430569", "0.6430569", "0.6430569", "0.6398873", "0.6335036", "0.62369657", "0.62285584", "0.6216931", "0.6206771", "0.61830616", "0.61691725", "0.6162804", "0.6153047", "0.61520946", "0.60394746", "0.60298425", "0.6023776", "0.60130507", "0.59975535", "0.59950846", "0.5987625", "0.5987625", "0.5985095", "0.5970366", "0.5970366", "0.5970366", "0.5970366", "0.5970366", "0.5970366", "0.59240466", "0.59234697", "0.59070534", "0.58870214", "0.5876542", "0.5875501", "0.5864835", "0.5852719", "0.5851221", "0.5834667", "0.58333284", "0.58232987", "0.5822411", "0.58117497", "0.57957125", "0.57832485", "0.5761282", "0.57603836", "0.57494766", "0.5731769", "0.5716535", "0.57028383", "0.5697114", "0.5682707", "0.56771326", "0.56670874", "0.5643907", "0.564221", "0.5638159", "0.5638159", "0.5638159", "0.5636952", "0.5636774", "0.5634626", "0.56339437", "0.56307906", "0.5614566", "0.5614566", "0.5614566", "0.5614566", "0.5614566", "0.5614566", "0.5614566", "0.5614566", "0.5614104", "0.5610975", "0.55933046", "0.5591185", "0.5591146", "0.5576805", "0.5574777", "0.557162", "0.55642974", "0.5558596", "0.55456656", "0.5543282", "0.55362463", "0.55330867", "0.5532942", "0.55326647", "0.55236673", "0.55236673", "0.5516836", "0.55146855", "0.55113655", "0.550924" ]
0.73323816
0
View for creating action
def action_add(request, employee_id=None): if employee_id: employee = Employee.objects.get(pk=employee_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: raise PermissionDenied() else: employee = request.user.employee_user.first() if request.method == 'POST': form = ActionForm(request.POST) if form.is_valid(): form.save(request.user, employee) return HttpResponseRedirect('/action/%d' % form.instance.pk) else: form = ActionForm() return TemplateResponse( request, 'mus/action_edit.html', dict( form=form ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def goto_create(self):\n\n self.create.click()", "def create(self):", "def create(self, *args, **kwargs):\n pass", "def create(self):\n ...", "def new_result():\n return ActionResult()", "def create_view(request, title, modelform, **kwargs):\n instance_form = modelform(request.POST or None)\n if instance_form.is_valid():\n instance = instance_form.save(commit=False)\n for default in kwargs.keys():\n setattr(instance, default, kwargs[default])\n instance.save()\n messages.success(request, _(\"%s was created.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Create\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def post(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "def create(self, request, action, *args, **kwargs):\n\t\tallowed_actions = ['get_token']\n\t\tif action in allowed_actions:\n\t\t\treturn getattr(self, '_create_' + action)(request, *args, **kwargs)\n\t\treturn rc(rcs.BAD_REQUEST)", "def action(self):\n pass", "def action(self):\n pass", "def create():\n pass", "def help_create(self):\n print(CREATE)", "def post(self, request, *args, **kwargs):\n return super().create(*args, **kwargs)", "def create(action_data, page):\n return Action.objects.create(**{\n \"action\": action_data,\n \"page\": page\n })", "def create_actions_template(name):\n template = Template(ACTIONS_TEMPLATE)\n msg = template.render(name=name)\n return msg", "def create_action(instance, verb, user):\n return instance.activities.create(action=verb, owner=user)", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def action_type(self):", "def _formulate_action(Action, **kwargs):\n\n return Action(**kwargs)", "def _post(self, request_obj):\n return self._execute_action(request_obj, [CreateAction, EditAction], 'POST')", "def create():", "def create():", "def create(self):\n\n pass", "def KLP_Institution_Management_Create(request):\n\tbuttonType = request.POST.get('form-buttonType')\n #before Institution_Mangement.objects.all()\n\tKLP_Institution_Management_Create = KLP_Institution_Management(queryset = Institution_Management.objects.filter(pk=0), permitted_methods = ('GET', 'POST'), responder = TemplateResponder(template_dir = 'viewtemplates', template_object_name = 'InstitutionManagement',extra_context={'buttonType':buttonType}), receiver = XMLReceiver(),)\n\tresponse = KLP_Institution_Management_Create.responder.create_form(request,form_class=Institution_Management_Form)\n\t\n\treturn HttpResponse(response)", "def new(self):\n flash_message(_(\"success message\"), \"success\")\n flash_message(_(\"warning message\"), \"warning\")\n flash_message(_(\"error message\"), \"error\")\n flash_message(_(\"notice message\"), \"notice\")\n return render('/derived/rock/new.mako')", "def create_action(self, name):\n action = self.get_action(name)\n if action is None:\n try:\n action = self.action_model()\n action.name = name\n self.get_session.add(action)\n self.get_session.commit()\n return action\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_ADD_PERMISSION.format(e))\n self.get_session.rollback()\n return action", "def _Create(self):\n pass", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def createAction(self):\n self.createProjectAction = QtGui.QAction(self.tr(\"&New Project\"), self)\n self.createProjectAction.setShortcut(QtGui.QKeySequence.New)\n self.createProjectAction.setStatusTip(self.tr(\"Create a new project\"))\n self.connect(self.createProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"newProject()\"))\n\n self.openProjectAction = QtGui.QAction(self.tr(\"&Open...\"), self)\n self.openProjectAction.setShortcut(QtGui.QKeySequence.Open)\n self.openProjectAction.setStatusTip(self.tr(\"Open an existing project\"))\n self.connect(self.openProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"openProject()\"))\n\n self.saveProjectAction = QtGui.QAction(self.tr(\"&Save\"), self)\n self.saveProjectAction.setShortcut(QtGui.QKeySequence.Save)\n self.saveProjectAction.setStatusTip(self.tr(\"Save the current project\"))\n self.connect(self.saveProjectAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"save()\"))\n\n self.importVideoAction = QtGui.QAction(self.tr(\"&Import video...\"), self)\n self.importVideoAction.setStatusTip(self.tr(\"Import a video into your project\"))\n self.connect(self.importVideoAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"importVideo()\"))\n\n self.aboutAction = QtGui.QAction(self.tr(\"&About\"), self)\n self.aboutAction.setStatusTip(self.tr(\"Show the credits and authors\"))\n self.connect(self.aboutAction, QtCore.SIGNAL(\"triggered()\"), self, QtCore.SLOT(\"showAbout()\"))", "def render_creation_form(request: Request):\n return templates.TemplateResponse(\"creation_form.html\",{'request': request})", "def issueCreate(request):\n args = { 'statusForm' : forms.itemStatusForm(), }\n return render_to_string('issueCreate.html', args,\n context_instance=RequestContext(request))", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def action(self, action_id):\r\n return Action(self, action_id)", "def action(self, action_id):\r\n return Action(self, action_id)", "def create(owner):\n data = request_content(request)\n resource = logic.resource.create(owner, data)\n return redirect(url_for('.get', owner=owner, \n resource=resource.name))", "def make_form(self):", "def new(self, *args, **kw):\n\n\t\t\tif len(args) > 0:\n\t\t\t\tkw['id_fase_fk']= args[0] \n\n\t\t\ttmpl_context.widget = self.new_form\n\t\t\tretorno \t\t= dict(value = kw, model = self.model.__name__)\n\t\t\tretorno['fid']\t= args[0]\n\n\t\t\treturn retorno", "def register_create_view(self, blueprint):\n view = apply_decorators(self.create_view, self.create_decorators)\n blueprint.add_url_rule(\n self.create_rule, self.create_endpoint, view,\n methods=['GET', 'POST'])", "def create_test_action(context, **kw):\n action = get_test_action(context, **kw)\n action.create()\n return action", "def get_context_data(self, **kwargs):\n context = super(CRUDCreateView, self).get_context_data(**kwargs)\n\n context.update({\n 'model_verbose_name': self.form_class._meta.model._meta.verbose_name,\n 'model_verbose_name_plural': self.form_class._meta.model._meta.verbose_name_plural,\n })\n\n #try:\n context['fields'] = utils.get_fields(self.form_class._meta.model)\n # except AttributeError:\n # context['fields'] = utils.get_fields(self.form_class._meta.model)\n\n if hasattr(self, 'object') and self.object:\n for action in utils.INSTANCE_ACTIONS:\n try:\n url = reverse(\n utils.crud_url_name(self.form_class._meta.model, action),\n kwargs={'pk': self.form_class._meta.object.pk})\n except NoReverseMatch:\n url = None\n context['url_%s' % action] = url\n\n for action in utils.LIST_ACTIONS:\n try:\n url = reverse(\n utils.crud_url_name(self.form_class._meta.model, action)\n )\n except NoReverseMatch:\n url = None\n context['url_%s' % action] = url\n\n return context", "def custom_actions(self, form_wizard_entry, request=None):", "def create(ctx):\n pass", "def custom_actions(self, form_entry, request=None):", "def create_action(self, action: Action, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n }\n\n path = Template(\"/action/v1beta2/actions\").substitute(path_params)\n url = self.base_client.build_url(path)\n data = action.to_dict()\n response = self.base_client.post(url, json=data, params=query_params)\n return handle_response(response, Action)", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')", "def add_view( *args, **kwargs ):", "def actionURL(self):\n raise NotImplementedError()", "def post(self, request, cluster_id, action_id):\n cluster = check_obj(Cluster, cluster_id, 'CLUSTER_NOT_FOUND')\n check_obj(\n Action,\n {'prototype': cluster.prototype, 'id': action_id},\n 'ACTION_NOT_FOUND'\n )\n serializer = self.serializer_class(data=request.data, context={'request': request})\n return create(serializer, action_id=int(action_id), selector={'cluster': cluster.id})", "def goto_create_course(self):\n\n self.create.click()", "def post(self, request, *args, **kwargs):\n getattr(self, kwargs['action'])()\n return HttpResponse()", "def new(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item', id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(atras)\n tmpl_context.widget = self.new_form\n return dict(value=kw, \n page=u\"Nuevo Atributo\", \n action=url_action, \n atras=url_action)", "def btn_create_reco(self):\n\t\tprint()\n\t\tprint('OH - btn_create_reco')\n\n\t\t# Init\n\t\tres_id = self.id\n\t\tres_model = _model_treatment\n\t\tview_id = self.env.ref('openhealth.treatment_2_form_view').id\n\n\t\t# Open\n\t\treturn {\n\t\t\t# Mandatory\n\t\t\t'type': _model_action,\n\t\t\t'name': 'Open Treatment Current',\n\t\t\t# Window action\n\t\t\t'priority': 1,\n\t\t\t'res_id': res_id,\n\t\t\t'res_model': res_model,\n\t\t\t#'view_id': view_id,\n\t\t\t# Views\n\t\t\t#\"views\": [[False, \"form\"]],\n\t\t\t\"views\": [[view_id, \"form\"]],\n\t\t\t'view_mode': 'form',\n\t\t\t'target': 'current',\n\t\t\t#\"domain\": [[\"patient\", \"=\", self.patient.name]],\n\t\t\t#'auto_search': False,\n\t\t\t'flags': {\n\t\t\t\t\t\t#'form': {'action_buttons': True, 'options': {'mode': 'edit'}}\n\t\t\t\t\t\t'form': {'action_buttons': False, }\n\t\t\t\t\t},\n\t\t\t'context': {\n\t\t\t\t\t\t#'default_treatment': treatment_id,\n\t\t\t\t\t}\n\t\t}", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def post(self):\r\n data = request.form\r\n return create(data=data)", "def _action(self):\n pass", "def create(*args):", "def create(self,request):\n return CustomAuthToken().post(request)", "def test_perform_create(self):\n\n response = self.client.post(reverse('action-list'), data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['name'], self.data['name'])\n self.assertTrue(len(response.data['institution']), self.data['institution'])", "def create(self, request, *args, **kwargs):\n return super(SubtaskViewSet, self).create(request, *args, **kwargs)", "def community_post_create_view(request):\n task = \"Create New\"\n form = AddEditPostForm() # An unbound form\n\n if request.method == 'POST': # If the form has been submitted...\n form = AddEditPostForm(request.POST, request.FILES) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n post = form.save(commit=False) # Create a new object from the form, but don't save it to the database\n post.author = request.user # Set the author to the current user\n post.save() # Save the object to the database\n slug_str = \"%s %s\" % (post.title, post.date_posted) # Create a slug from the title and date\n post.slug = slugify(slug_str) # Create the slug\n post.save() # Save the object to the database\n return redirect('community-home') # Redirect to the home page\n\n context = { # Pass the variables to the template\n 'task': task,\n 'form': form,\n }\n return render(request,\n 'pages/patient-community/community-create-update-post.html',\n context) # render the patient community create post page", "def go_to_create_tag():\n\n posts = Post.query.all()\n return render_template('tags/new.html', posts=posts)", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create(self, request, *args, **kwargs):\n return super(FacilityViewSet, self).create(request, *args, **kwargs)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_create_action(self):\n pass", "def action_detail(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=int(action_id))\n # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n if not employee.hasAccessTo(action.employee):\n raise PermissionDenied()\n\n if request.method == 'POST':\n form = ActionCommentForm(request.POST)\n if form.is_valid():\n form.save(request.user, action)\n return HttpResponseRedirect('/action/%s' % action_id)\n else:\n form = ActionCommentForm()\n return TemplateResponse(\n request,\n 'mus/action_detail.html',\n dict(\n action=action,\n form=form\n )\n )", "def view(self):", "def crear_turno(request):\n if request.method == 'POST':\n attention = AttentionType.objects.get(name=request.POST['tipo_atencion'])\n try:\n last_attention = InitialAttention.objects.filter(\n attention_type=attention,\n created__contains=timezone.now().date()\n ).order_by('-id_initial_attention')[0]\n except Exception as e:\n last_attention = None\n\n if last_attention and (timezone.now().date() == last_attention.created.date()):\n initial_atention = InitialAttention.objects.create(\n attention_number=last_attention.attention_number+1,\n attention_type=attention,\n created=timezone.now()\n )\n else:\n initial_atention = InitialAttention.objects.create(\n attention_number=1,\n attention_type=attention,\n created=timezone.now()\n )\n serializer = InitialAttentionSerializers(initial_atention)\n check_alert()\n return JSONResponse(serializer.data, status=201)", "def _generate_actions(self) -> list:\n pass", "def action_spec(self):\r\n pass", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def create(self, request, *args, **kwargs):\n response = super(ProviderViewSet, self).create(request, *args, **kwargs)\n response.data['message'] = \"Provedor ha sido creado\"\n return response", "def name(self):\n return \"action_news_cnn\"", "def Create(self):\n raise NotImplementedError()", "def get(self):\n\n self.render(\"newpost.html\", user=self.user)", "def create(cls, *args: Any, **kwargs: Any) -> \"Tab\":", "def help_create(self):\n print(\"create instances\")", "def create_action(self, parent):\n return QtGui.QAction(parent)", "def new_parameter(request, **_kwargs):\n return create_view(request, _(\"Parameter\"), ParameterForm)", "def post(self, request, *args, **kwargs):\n response = super().post(request, *args, **kwargs)\n response.status_code = status.HTTP_201_CREATED\n return response", "def add_view(self, request):\r\n instance_form = self.get_minimal_add_form()\r\n form = instance_form(request.POST, request.FILES, prefix=self.base_url())\r\n\r\n new_instance = None\r\n if form.is_valid():\r\n new_instance = form.save()\r\n template = select_template(self.item_add_template)\r\n context = RequestContext(request)\r\n context.update({\r\n \"insert\": self,\r\n \"form\": form,\r\n \"object\": new_instance\r\n })\r\n response = HttpResponse(template.render(context))\r\n response.status_code = 201\r\n return response\r\n response = HttpResponse(form.errors)\r\n response.status_code = 400\r\n return response", "def get_action(self, context):\n pass", "def post(self, request, *args, **kwargs):\n return self.render_to_response(self.get_context_data())", "def mk_easy_field(self):\n easy_options = self.controller.get_minefield_options()[\"easy\"]\n self.controller.set_difficulty(easy_options)\n return Action(\"goto generating view\", [])", "def __actions__(self, obj):\n\t\t\tprimary_fields \t= self.__provider__.get_primary_fields(self.__entity__)\n\t\t\tpklist \t\t= '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n\n\t\t\tvalue \t\t= '<div>'\n\t\t\tif has_permission('editar_LB'):\n\t\t\t\tvalue = value + '<div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a></div>'\n\t\t\tif has_permission('eliminar_LB'):\n\t\t\t\tvalue = value + '<div><form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\"><input type=\"hidden\" name=\"_method\" value=\"DELETE\" /><input class=\"delete-button\" onclick=\"return confirm(\\'Est&aacute; seguro que desea eliminar?\\');\" value=\"delete\" type=\"submit\" style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/></form></div>'\n\t\t\tvalue = value + '</div>'\n\t\t\treturn value", "def create_page(self):", "def new_recipe(request, **_kwargs):\n return create_view(request, _(\"Recipe\"), RecipeForm)", "def actions():\n pass", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def post(self, request, pk):\n action_key = request.POST.get(\"action\")\n _, method = self.actions[action_key]\n getattr(self, method)()\n return HttpResponseRedirect(reverse(\"event_admin\", kwargs={\"pk\": pk}))", "def obtain_action(self):\r\n\t\treturn", "def name(self):\n return \"action_news_abc\"", "def create(request, model, decorator = lambda x:x,\r\n post_save_redirect='', template_name=''):\r\n \r\n FormClass = decorator(\r\n forms.form_for_model(\r\n model,\r\n fields = get_allowed_fields(request, model),\r\n ),\r\n request,\r\n )\r\n \r\n template_name = template_name or _make_template_name(model, 'form')\r\n\r\n if request.method == 'POST':\r\n form = FormClass(request.POST)\r\n if form.is_valid():\r\n record = form.save(commit = False)\r\n record.account = request.account\r\n record.created_by = request.person\r\n record.save()\r\n return HttpResponseRedirect(\r\n post_save_redirect or record.get_absolute_url()\r\n )\r\n else:\r\n form = FormClass()\r\n return render_to_response(\r\n template_name,\r\n context_instance = RequestContext(\r\n request,\r\n {'form': form}\r\n )\r\n )", "def get(self):\n if self.user:\n self.redirect_to('secure', id=self.user_id)\n params = {\n \"action\": self.request.url,\n }\n return self.render_template('create_user.html', **params)", "def actioncluster_add(request):\n if request.method == 'POST':\n form = ActionClusterForm(request.POST, request.FILES)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.owner = request.user\n instance.save()\n form.save_m2m()\n mailer.notify_request(instance)\n messages.success(\n request, 'The action cluster \"%s\" has been added.' % instance.name)\n return redirect(instance.get_absolute_url())\n else:\n form = ActionClusterForm()\n context = {\n 'form': form,\n }\n return TemplateResponse(request, 'actionclusters/object_add.html', context)", "def KLP_Boundary_Create(request):\n\n # Checking user Permissions\n\n KLP_user_Perm(request.user, 'Boundary', 'Add')\n buttonType = request.POST.get('form-buttonType')\n KLP_Create_Boundary = \\\n KLP_Boundary(queryset=Boundary.objects.filter(pk=0),\n permitted_methods=('GET', 'POST'),\n responder=TemplateResponder(template_dir='viewtemplates'\n , template_object_name='boundary',\n extra_context={'buttonType': buttonType}),\n receiver=XMLReceiver())\n\n response = KLP_Create_Boundary.responder.create_form(request,\n form_class=Boundary_Form)\n\n return HttpResponse(response)", "def log_create(action, *args, **kw):\n from olympia.activity.models import ActivityLog\n\n return ActivityLog.create(action, *args, **kw)", "def create(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create\"), kwargs)" ]
[ "0.6654641", "0.6543866", "0.6499162", "0.6441966", "0.64285094", "0.63419896", "0.6320924", "0.630966", "0.6293948", "0.6293948", "0.62471634", "0.6235429", "0.620226", "0.61985743", "0.6198156", "0.6169896", "0.6168643", "0.6168643", "0.6168643", "0.61534095", "0.6133898", "0.6124634", "0.6108718", "0.6108718", "0.61048675", "0.60637003", "0.60380876", "0.6036932", "0.60226154", "0.60009474", "0.60009474", "0.60009474", "0.59737486", "0.596551", "0.5950618", "0.5947587", "0.59403396", "0.59403396", "0.5915804", "0.5908338", "0.5896955", "0.58956975", "0.5881074", "0.5873889", "0.5838209", "0.583171", "0.58204514", "0.5818266", "0.58175546", "0.58138055", "0.5807059", "0.58031315", "0.57899904", "0.578651", "0.5778474", "0.57783896", "0.57755005", "0.5774004", "0.57724005", "0.5767475", "0.5759739", "0.57580364", "0.5738005", "0.5735984", "0.5731051", "0.5722342", "0.57214755", "0.57165", "0.5714398", "0.5709333", "0.5702588", "0.56937444", "0.5693106", "0.56916124", "0.56846917", "0.5684036", "0.56762147", "0.5676079", "0.5666749", "0.566446", "0.56600124", "0.56464046", "0.563957", "0.56386125", "0.5637167", "0.5636418", "0.5635413", "0.56338614", "0.5632596", "0.56181175", "0.56127065", "0.5611979", "0.5609785", "0.5606464", "0.55944735", "0.5580785", "0.55759823", "0.5556157", "0.5555269", "0.5548945", "0.55438745" ]
0.0
-1
Create a leader model for employees
def create_leader_model(request, company_id): errors = {'noactions': []} company = Company.objects.get(pk=company_id) currentEmpl = Employee.objects.get(user__pk=request.user.pk) """:type : Employee """ if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk: raise PermissionDenied() if currentEmpl.isCompanySuperUserOrHigher(): employeeQS = Employee.objects.filter( company__pk=company_id ) else: employeeQS = Employee.objects.filter( Q(manager=currentEmpl), company__pk=company_id ) form = MultiLeaderModelForm(request.POST or None) form.fields['employees'].queryset = employeeQS if request.method == 'POST' and form.is_valid(): employees = form.cleaned_data['employees'] """:type : list[Employee] """ pdf_response = get_leader_model_pdf(currentEmpl, employees) if isinstance(pdf_response, HttpResponse): return pdf_response else: errors = pdf_response print(errors) return TemplateResponse( request, 'mus/create_leader_model.html', { 'form': form, 'company': company, 'errors': errors } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle(self, *args, **kwargs):\n seeder = Seed.seeder()\n seeder.add_entity(User, 20)\n\n seeder.add_entity(EmployeeMptt, 20, {\n 'user': lambda x: User.objects.filter(employeemptt=None).first(),\n 'parent': lambda x: EmployeeMptt.objects.order_by(\"?\").first(),\n 'level': lambda x: random.randint(0, 4),\n })\n seeder.execute()", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_team_entity = self.controller.assign_team_leader(request_data=request_data)\n serializer = data_serializers.TeamLeaderPresenterSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.EmployeeDoesNotExist\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def leader(self):\n pass", "def leader(self):\n pass", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n respond_data = self.controller.add_team_employee(request_data=request_data)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(respond_data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.EmployeeDoesNotExist,\n domain_exceptions.EmployeeIsATeamMember\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def leaderboard(self):\n pass", "def get_leader_model_pdf(currentEmpl, employees):\n\n lm = LeaderModel()\n employee_actions = {}\n legend = []\n colors = {}\n errors = {'noactions': []}\n # numbered_actions = {}\n\n for empl in employees:\n\n if not currentEmpl.hasAccessTo(empl):\n raise PermissionDenied()\n\n actions = empl.action_set.all()\n\n if not len(actions):\n errors['noactions'].append(empl)\n continue\n\n lkey = empl.user.first_name + \" \" + empl.user.last_name\n legend.append(lkey)\n\n if not lkey in employee_actions:\n employee_actions[lkey] = {}\n\n for action in actions:\n\n if not action.difficulty or not action.type:\n errors['noactions'].append(empl)\n continue\n\n circle_number = lm.addCircle(action)\n latest_comment = action.getLatestComment()\n\n employee_actions[lkey][circle_number] = {\n 'name': action.title,\n 'type': action.type,\n 'difficulty': action.getDifficultyText(),\n 'comment': latest_comment\n }\n\n if lkey not in colors:\n color = lm.getEmployeeColors(empl.id)\n colors[lkey] = \"rgb({}, {}, {})\".format(color[0], color[1], color[2])\n\n if len(errors['noactions']):\n return errors\n\n lm_filename = path.join(settings.STATIC_ROOT, \"leadermodel_{}.png\".format(currentEmpl.id))\n lm.writeImage(lm_filename)\n\n #\n # Write PDF\n\n pdfFilename = path.join(settings.FILES_ROOT, \"leadermodel_{}.pdf\".format(currentEmpl.id))\n template = get_template('mus/leader_model_pdf.html')\n context = Context({\n 'site_url': settings.SITE_URL,\n 'lm_filename': lm_filename,\n 'employee_actions': employee_actions,\n 'colors': colors,\n 'legend': legend\n })\n\n html = template.render(context)\n # html = html.replace('<li>','<li><img class=\"square\" src=\"http://test.nxtlvl.dk/static/img/square.png\" />')\n result = open(pdfFilename, 'wb')\n pisa.pisaDocument(StringIO.StringIO(\n html.encode(\"UTF-8\")), dest=result)\n result.close()\n\n wrapper = FileWrapper(file(pdfFilename))\n response = HttpResponse(wrapper, content_type='application/pdf')\n response['Content-Disposition'] = 'attachment;filename=ledermodel.pdf'\n response['Content-Length'] = os.path.getsize(pdfFilename)\n\n return response\n # return HttpResponseRedirect('/employee/all/%d' % int(company_id))", "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def __init__(self, employee_id, name, supervisor_id, lft, rgt):\n self.employee_id = employee_id\n self.name = name\n self.supervisor_id = supervisor_id\n self.lft = lft\n self.rgt = rgt", "async def elect_leader( request ):\n resource = request.match_info['resource']\n node = request.match_info['node']\n ttl = int( request.match_info['ttl'] )\n leader_election = await create_leader_election( redises, resource, node, ttl )\n try:\n leader = await leader_election.elect_leader()\n return web.json_response( {\"leader\": leader} , status = 200 )\n except Exception as ex:\n print(ex)\n return web.json_response( {\"error\": \"fail to elect leader\" }, status = 501 )", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def setUp(self):\n self.my_employee = Employee('knight', 'lee', 10000)", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def AddLeader(self,*pntArray,annotation=None,type=None):\n\t\treturn self.Space.AddLeader(VtVertex(*pntArray),annotation,type)", "def find_leader(self):\r\n # Initialize the leader fitness as an arbitrarly bad value\r\n leaderFitness = -(2**63)\r\n \r\n for number in range(POPULATION_SIZE):\r\n if self.population[number].current_fitness > leaderFitness:\r\n leaderFitness = self.population[number].current_fitness\r\n self.leader = number", "def add_user(user):\n new_user = models.Leaderboard(username=user, score=100)\n db.session.add(new_user)\n db.session.commit()\n all_people = models.Leaderboard.query.all()\n users = []\n for person in all_people:\n users.append(person.username)\n return users", "def take_leader(self):", "def setUp(self):\n\t\tfirst_name = 'Gerson'\n\t\tlast_name = 'Santos'\n\t\tannual_salary = 5000\n\t\tself.gerson = Employee(first_name, last_name, annual_salary)", "def becomeLeader(self):\n logging.info('become leader for term {}'.format(self.current_term))\n\n # no need to wait for heartbeat anymore\n self.election_timer.cancel()\n\n self.role = 'leader'\n self.leader_id = self.datacenter_id\n # keep track of the entries known to be logged in each data center\n # note that when we are in the transition phase\n # we as the leader need to keep track of nodes in\n # the old and the new config\n self.loggedIndices = dict([(center_id, 0)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n # initialize a record of nextIdx\n self.nextIndices = dict([(center_id, self.getLatest()[1]+1)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n\n self.sendHeartbeat()\n self.heartbeat_timer = Timer(self.heartbeat_timeout, self.sendHeartbeat)\n self.heartbeat_timer.daemon = True\n self.heartbeat_timer.start()", "def setUp(self):\n\tself.emp = Employee('Lin',10000)\n\tself.emp2 = Employee('Jun',20000)", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def makeLeader(self, node_id):\n self.graph.addTriple(\n node_id, self.annotation_properties['clique_leader'], True,\n object_is_literal=True, literal_type='xsd:boolean')\n return", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def _update_leader(self):", "async def create_leader_election( redises, resource, node, ttl ):\n return asyncleaderelection.LeaderElection( redises, resource, id = node, ttl = ttl )", "def employees_earning(table):\n\n product_index = 1\n employee_id_index = 2\n amount_sold_index = 4\n\n person_id_index = 0\n person_name_index = 1\n\n game_index = 0\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n hr_table = hr.get_table('model/hr/persons.csv')\n money_earned = {}\n for person in hr_table:\n person_id = person[person_id_index]\n person_name = person[person_name_index]\n money_earned[person_name] = 0\n for record in table:\n product_id = record[product_index]\n employee_id = record[employee_id_index]\n amount_sold = int(record[amount_sold_index])\n if person_id == employee_id:\n for game in store_table:\n game_id = game[game_index]\n if game_id == product_id:\n game_price = int(game[price_index])\n money_earned[person_name] += int(amount_sold * game_price)\n return money_earned", "def main():\n name = input(\"Please enter in your name: \")\n\n \"\"\"Ask the user to enter a number if they are a Director, Manager or Staff.\"\"\"\n \"\"\"This will check and make sure the user only enters in 1,2, \n or 3 and a number greater than zero\"\"\"\n while True:\n try:\n designation_number = int(input(\"Please enter in \\n1 for Director \"\n \"\\n2 for Manager \\n3 for Staff\\n\"))\n if 0 < designation_number <= 3:\n break\n print(\"Invalid number entered.\")\n except Exception as e:\n print(e)\n \"\"\"Gets the user salary and makes sure is a number and greater than 0\"\"\"\n while True:\n try:\n salary = float(input(\"Please enter in your salary: \"))\n if salary <= 0:\n print(\"Your salary must be at least 1 dollar. Please enter a number greater than zero.\")\n else:\n break\n except ValueError:\n print(\"Oops! That was not a valid number. Try again...\")\n\n \"\"\"Create Employee\"\"\"\n employee1 = employee.Employee()\n employee1.set_name(name)\n employee1.set_designation(designation_number)\n employee1.set_salary(salary)\n print(employee1)", "def update_role(self):\n all_leader = []\n user_records = self.info\n per = Persons()\n for record in user_records:\n if record['leader'] not in all_leader:\n all_leader.append(record['leader'])\n # print len(all_leader)\n # print all_leader\n for leader in all_leader:\n # print leader\n fil = per.get_one({'dn':leader})\n # print fil\n if fil is None:\n print 'this leader %s is not in our db,please check' % leader\n else:\n per.update_one({'dn':leader},{'role':'leader'})", "def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee", "def setUp(self):\n\t\tself.mason = Employee(\"mason\",\"karsevar\",10000)", "def leaderboard():\n # Get leaderboard and user information\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n # Get top gainer leaderboards\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('leaderboard.html',\n leaderboard=leaderboard,\n current_user_info=current_user_info,\n weektopgainers=weektopgainers,\n monthtopgainers=monthtopgainers,\n userbalance=current_user.balance)", "def create_educator(data):\n\n educator = Educator(\n name=data['name'],\n work_email=data['work_email'],\n organization_name=data['organization_name'],\n org_or_school=data['org_or_school'],\n address_line_1=data['address_line_1'],\n address_line_2=data['address_line_2'],\n city=data['city'],\n state=data['state'],\n zipcode=data['zipcode'],\n num_students=data['num_students']\n )\n educator.save()\n return educator", "def leader_list():\n\n add_trainee_form = AddTraineeForm()\n return render_template(\n \"leaders_list.html\",\n add_trainee_form=add_trainee_form,\n title=\"Encadrants\",\n )", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "async def leaderboard(self, ctx) -> None:\n await ctx.send(\n \"\",\n embed=NumEmbed(\n title=\"Points Leaderboard\",\n fields=self.bot.points_leaderboard.field_representation,\n user=ctx.author,\n ),\n )", "def setUp(self):\n self.employee = Employee('John', 'Doe', 50000)\n self.raise_amount = 20000", "def create_manager(self, name, pos, dept):\n self.manager[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'senior': [],\n 'junior': [],\n 'trainee': []\n }\n )", "def view_sales_leaderboard():\n # Later will add the ability to sort by date and Category\n try:\n with session_scope() as db_session:\n # Added filters by date\n users = db_session.query(User).all()\n leaderboard = []\n\n for user in users:\n username = user.username\n sales = 0\n products = db_session.query(Product).filter(Product.user_id == user.id).all()\n for product in products:\n order_lines = db_session.query(OrderLine).filter(OrderLine.product_id == product.id)\n for order_line in order_lines:\n sales = sales + order_line.quantity\n seller = SellerRecord(username, sales)\n leaderboard.append(seller)\n # Sort the entries\n leaderboard.sort(reverse=True)\n first_ten = []\n for i in range(min(10, len(leaderboard))):\n first_ten.append(leaderboard[i].to_json())\n\n except DBAPIError as db_error:\n # Returns an error in case of a i/sales/ntegrity constraint not being followed.\n return {\n 'code': 400,\n 'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)\n }, 400\n\n except NoResultFound:\n return {\n 'code': 400,\n 'message': \"No sales have been registered\"\n }, 400\n return {\n \"top_sellers\": first_ten\n }, 200", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def test_new_model_creation(self):\n initial_count = Employer.objects.count()\n self.new_employer.save()\n new_count = Employer.objects.count()\n self.assertNotEqual(initial_count, new_count)\n\n self.name2 = 'employe223'\n self.new_employee = Employee(\n name=self.name2, employer=self.new_employer)\n self.new_employee.save()\n self.assertEqual(len(Employee.objects.all()), 1)", "def create_employee(attributes):\n neccessary_keys = [\"empid\", \"gender\", \"sales\", \"bmi\", \"salary\", \"birthday\",\n \"age\"]\n for key in neccessary_keys:\n if not key in attributes.keys():\n raise ValueError(\"employee could not be created: {} is missing\".format(key))\n return Employee(attributes[\"empid\"], attributes[\"gender\"],\n attributes[\"sales\"], attributes[\"bmi\"],\n attributes[\"salary\"], attributes[\"birthday\"],\n attributes[\"age\"])", "def create(self, vals):\n if not vals.get('nik_number'):\n vals['nik_number'] = self.generate_nik(vals)\n return super(Employee, self).create(vals)", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def test_030_premier_league_normalised_points(self):\n\n def create_premier_league_normalised_points_model_fn(fn_team: str):\n team_stat = Stats.n_sample_stats_for_team(cursor=db_in_cursor,\n team=fn_team,\n last_sample_date=self.model_date,\n n_samples=self.num_samples,\n normalize_by_matches=True)\n\n #  Approximate premier league rank, by using points and a tiny nudge (compared to points) from goal\n # difference to separate those on the same points.\n return FeatureModel(input_data=team_stat,\n id=team_stat.team_name,\n feature_model_making_fn=lambda stat: stat.points\n )\n\n for match_date in played_home_OR_away_before_dates:\n ####\n #  Build model up to the day before the match\n ####\n self.model_date = match_date - timedelta(days=1)\n self.num_samples = num_matches_in_season\n\n models: {str: FeatureModel} = FeatureModel.create_models_for_all_teams(\n model_making_fn=create_premier_league_normalised_points_model_fn, entities=teams)\n\n self.persist_models(model_gen_date=self.model_date, model_description=self.shortDescription(),\n models=models)\n\n self.make_and_store_predictions_for_date(match_date=match_date, models=models)", "def setUpTestData(cls):\n countries = [\"MX\", \"CHL\", \"USA\", \"PER\", \"COL\"]\n slack_user_ids = [\"UP0918MAV\", \"UP0918MAV\", \"UP0918MAV\", None, None]\n cls.menu = Menu.objects.create(available_on=date.today())\n for count in range(5):\n user = User.objects.create(username=f\"johny.doe {count}\")\n Employee.objects.create(\n user=user, country=countries[count], slack_user_id=slack_user_ids[count]\n )", "def createEmployee(firstName, lastName, ssn, salary):\n employee = Employee(firstName, lastName, ssn, salary)\n # verify\n if firstName != employee.firstName or \\\n lastName != employee.lastName or \\\n ssn != employee.ssn or \\\n salary != employee.salary:\n raise ValueError(\"Failed to initialize Employee\")\n return employee", "def create(self, vals):\n record = super(KaHrPayrollRapelEmployeePromote, self).create(vals)\n if not 'name' in vals or not vals.get('name'):\n record.name = \"Rapel {0}\".format(record.new_employee_promote_id.name)\n return record", "def create_employee_structure(employees):\n employees_dict = {}\n for employee in position_sort(employees):\n if not employee.is_secretary:\n adder(employees_dict, employee.prosecutors_office, {'employees': [], 'departments': {}, 'divisions': {}})\n if employee.prosecutors_office and employee.department and employee.division:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'divisions', {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office and employee.department:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'employees', [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['employees'].append(employee)\n elif employee.prosecutors_office and employee.division:\n adder(employees_dict[employee.prosecutors_office]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office:\n employees_dict[employee.prosecutors_office]['employees'].append(employee)\n return employees_dict", "def retrieve(self, request, pk=None):\n team_leader = self.get_team_leader_object(pk)\n serializer = data_serializers.TeamLeaderPresenterSerializer(team_leader)\n return Response(serializer.data, status=status.HTTP_201_CREATED)", "def create(learner, model_list, groups=None, T=None, **kwargs):\n if learner == \"dorm\":\n ol = DORM(model_list, groups, T) \n elif learner == \"dormplus\":\n ol = DORMPlus(model_list, groups, T) \n elif learner == \"adahedged\":\n ol = AdaHedgeD(model_list, groups, T, reg=\"adahedged\") \n elif learner == \"dub\":\n ol = AdaHedgeD(model_list, groups, T, reg=\"dub\") \n else: \n raise ValueError(f\"Unknown learning algorithm {learner}.\")\n\n return ol", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def test_access_employee(self):\n # Employee can't see any SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).read()\n # Employee can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).write({'team_id': self.company_data['default_sale_team'].id})\n # Employee can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_employee']).create({\n 'partner_id': self.partner_a.id,\n })\n # Employee can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).unlink()", "def _create(self, cursor, row):\n team = Team(name=row['name'])\n team.id = row['id']\n\n for u in self._get_users(cursor, team):\n team.add_user(u)\n\n return team", "def create_person(self):", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def retrieve_teams():\n #print \"Print the number of teams and the members on team\"\n employee_list_total = []\n employee_number_list = []\n\n # List for keeping used numbers\n for temp in range(1000, 3000):\n employee_number_list.append([None, False]) \n\n # Read how many teams that shall be given\n stdin_input = sys.stdin.readline()\n \n try:\n # Test if input was numeric\n no_of_teams = int(stdin_input)\n \n input_rows = []\n \n # Read in all teams from stdin\n for i in range(0, no_of_teams):\n input_rows.append(sys.stdin.readline())\n \n except ValueError:\n print \"Error: Wrong input format\"\n sys.exit()\n\n for row in input_rows:\n # Split team into two members\n team = row.split()\n\n # Test if two members are given\n if len(team) != 2:\n print \"Error: Two team members must be given: Program will exit!\"\n sys.exit()\n\n temp_empl = [0, 0]\n \n try :\n # Loop both team members on row and check if the are in the list\n for i in range(0, 2):\n # Check for team on position teamnumber-1000\n if employee_number_list[int(team[i])-1000][1] == False:\n # Employee is not found in list, add it!\n temp_empl[i] = Employee(team[i]) \n employee_list_total.append(temp_empl[i])\n # Set employee to been found\n employee_number_list[int(team[i])-1000][1] = True\n # Set reference to the employee object \n employee_number_list[int(team[i])-1000][0] = temp_empl[i]\n else:\n # Retrive the employee object\n temp_empl[i] = employee_number_list[int(team[i])-1000][0]\n \n except ValueError:\n print \"Error: Input must be numeric. Program will exit!\"\n sys.exit()\n \n i = 0 \n for i in range(0, 2):\n # Add co_workers to respectivly employee\n if i == 0:\n temp_empl[i].add_co_worker(temp_empl[1])\n else:\n temp_empl[i].add_co_worker(temp_empl[0])\n \n # Return the list of employees\n return employee_list_total", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def __init__(self):\n self.id = None\n self.name = None\n self.phone = None\n self.score = 0 # Running sum of player's score\n self.state = None\n self.ball_id = None\n self.start_x = None # start pos of object thrown in game\n self.angle = 0 # angle of ball movement\n self.velocity = 0 # velocity of ball\n # leaderboard\n self.game_over = False\n self.date = str(datetime.date.today()) # required for leaderboard", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def learning_to_rank(self):\n self.error_throw('rank')\n\n instance = Instance(self.table_name)\n instance.addTable(Table_LTR(instance,False,'',''))\n if self.import_method == 'mysql': instance = self.mysql_handle(instance)\n elif self.import_method == 'csv': instance = self.csv_handle(instance)\n\n self.rank_learning(instance)\n\n self.rank_method = methods_of_ranking[1] # = 'learn_to_rank'", "def buildHierarchy(self, test_input):\n for entry in test_input:\n if entry['manager']not in self.relations:\n self.relations[entry['manager']] = Node(entry['manager'], entry['name'])\n else:\n self.relations[entry['manager']].employees.append(entry['name'])", "def create_senior(self, name, pos, dept):\n self.senior[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'manager': self.manager[dept.upper()][0]['name'],\n 'junior': [],\n 'trainee': []\n }\n )\n self.manager[dept.upper()][0]['senior'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )", "def loadEmployees(testList):\n # define an empty employee List\n employeeList = []\n\n for item in testList:\n itemToAdd = None\n if item['type'] == 'employee':\n try:\n itemToAdd = createEmployee(item['firstName'],\n item['lastName'],\n item['SSN'],\n item['salary'])\n except ValueError:\n continue\n\n elif item['type'] == 'manager':\n try:\n itemToAdd = createManager(item['firstName'],\n item['lastName'],\n item['SSN'],\n item['salary'],\n item['title'],\n item['yearBonus'])\n except ValueError:\n continue\n # Add Employee/Manager Object to List\n if itemToAdd != None: # Note : this line will call Employee __eq__ to verify that it is not equal to None\n employeeList.append(itemToAdd)\n\n return employeeList", "def createGroup(listOfPerson):\n atk=Department()\n atk.members=listOfPerson\n return atk", "def create_trainee(self, name, pos, dept):\n self.trainee[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'manager': self.manager[dept.upper()][0]['name'],\n 'senior': self.senior[dept.upper()][0]['name'],\n 'junior': self.junior[dept.upper()][0]['name'],\n }\n )\n self.manager[dept.upper()][0]['trainee'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )\n self.senior[dept.upper()][0]['trainee'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )\n self.junior[dept.upper()][0]['trainee'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )", "def create_junior(self, name, pos, dept):\n self.junior[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'manager': self.manager[dept.upper()][0]['name'],\n 'senior': self.senior[dept.upper()][0]['name'],\n 'trainee': []\n }\n )\n self.manager[dept.upper()][0]['junior'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )\n self.senior[dept.upper()][0]['junior'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )", "def main():\n # create a list of test employees and managers\n testList = [\n {'type': 'employee', 'firstName': 'Mickey', 'lastName': 'Mouse', 'SSN': '100-12-3456', 'salary': 1500.00},\n {'type': 'manager', 'firstName': 'Walt', 'lastName': 'Disney', 'SSN': '100-00-0000', 'salary': 5000.00,\n 'title': 'Head Of Disneyland', 'yearBonus': 1000.00},\n {'type': 'employee', 'firstName': 'Donald', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 1000.00},\n {'type': 'manager', 'firstName': 'Minnie', 'lastName': 'Mouse', 'SSN': '999-99-999', 'salary': 10000.00,\n 'title': 'Head Of Mouse HouseHold', 'yearBonus': 15000.00},\n {'type': 'manager', 'firstName': 'Daisy', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 12000.00,\n 'title': 'Head Of Duck HouseHold', 'yearBonus': 10000.00}]\n\n # Define percentRaise (0.1 == 10%)\n percentRaise = 0.1\n\n # Create Employees and Managers Object using the Test data\n employeeList = loadEmployees(testList)\n\n # Sort employee List, which will ustilize Employee's __lt__ and __eq__ methods\n employeeList.sort()\n\n # Loop over Employee and Manager Objects\n print(\"Employees and Manager should be sorted by last name, then first\\n\")\n for employee in employeeList:\n if type(employee) == Manager:\n print(\"Manager:\")\n else:\n print(\"Employee:\")\n # Print Employee or Manager\n print(employee)\n # Give Raise to Employee or Manager\n employee.giveRaise(percentRaise)\n # Print New Salary\n print(\"With %.2f%% Raise, Salary: $%.2f\\n\" % (percentRaise * 100, employee.salary))\n\n # Employee docStrings\n print(\"\\nEmployee docstring for each method\")\n print(\"Employee.__doc__=\" + Employee.__doc__)\n print(\"Employee.__init__.__doc__=\" + Employee.__init__.__doc__)\n print(\"Employee.giveRaise.__doc__=\" + Employee.giveRaise.__doc__)\n print(\"Employee.__str__.__doc__=\" + Employee.__str__.__doc__)\n print(\"Employee.__eq__.__doc__=\" + Employee.__eq__.__doc__)\n print(\"Employee.__lt__.__doc__=\" + Employee.__lt__.__doc__)\n\n print(\"\\nManger docstring for each method\")\n print(\n \"Since Manager inherits from Employee, several of the methods ('giveRaise', '__eq__' and '__lt__') and the corresponding docstring will originate from the Employee class\\n\")\n print(\"Manager.__doc__=\" + Manager.__doc__)\n print(\"Manager.__init__.__doc__=\" + Manager.__init__.__doc__)\n print(\"Manager.giveRaise.__doc__=\" + Manager.giveRaise.__doc__)\n print(\"Manager.__str__.__doc__=\" + Manager.__str__.__doc__)\n print(\"Manager.__eq__.__doc__=\" + Manager.__eq__.__doc__)\n print(\"Manager.__lt__.__doc__=\" + Manager.__lt__.__doc__)", "async def create(self):\n cur = self.sql.cur\n\n user = Client().get_server(self.server_id).get_member(self.user_id)\n\n self.nickname = user.nick if user.nick else user.name\n\n nickname = self.nickname\n trainer_id = str(uuid.uuid4())\n self.trainer_id = trainer_id\n now = datetime.datetime.now()\n user_id = self.user_id\n server_id = self.server_id\n\n self.current_zone_id = '86'\n self.current_building_id = None\n self.current_region_id = None\n\n cmd = \"\"\"INSERT INTO trainers\n (trainer_id,\n user_id,\n server_id,\n nickname,\n created_on)\n VALUES\n (:trainer_id,\n :user_id,\n :server_id,\n :nickname,\n :now)\"\"\"\n cur.execute(cmd, locals())\n\n cmd = \"\"\"INSERT INTO trainer_stats\n (trainer_id)\n VALUES\n (:trainer_id)\"\"\"\n cur.execute(cmd, locals())\n\n cmd = \"\"\"INSERT INTO trainer_data\n (trainer_id,\n current_region_id,\n current_zone_id,\n current_building_id)\n VALUES\n (:trainer_id,\n :current_region_id,\n :current_zone_id,\n :current_building_id)\"\"\"\n cur.execute(cmd, self.__dict__)\n\n cmd = \"\"\"INSERT INTO trainer_party\n (trainer_id)\n VALUES\n (:trainer_id)\"\"\"\n cur.execute(cmd, locals())\n\n await self.sql.commit(now=True)\n self.log.info(f\"New trainer has been born! Welcome {trainer_id}\")", "def main():\n db.connect()\n db.execute_sql('PRAGMA foreign_keys = ON;')\n db.create_tables([\n Job,\n Person\n ])\n\n people = [\n ('Andrew', 'Sumner', 'Andy'),\n ('Peter', 'Seattle', None),\n ('Susan', 'Boston', 'Beannie'),\n ('Steven', 'Colchester', None),\n ('Peter', 'Seattle', None),\n ]\n\n for person in people:\n try:\n with db.transaction():\n new_person = Person.create(\n person_name = person[0],\n lives_in_town = person[1],\n nickname = person[2],)\n new_person.save()\n\n except Exception as e:\n logger.info(f'Error creating person = {person[0]}')\n logger.info(e)\n logger.info('See how the datbase protects our data')\n\n for person in Person:\n person.show()\n\n jobs = [\n ('Analyst', '2017-02-01', '2019-07-31', 34.999, 'Andrew'),\n ('Developer', '2017-02-01', '2019-07-31', 34.999, 'Fred'),\n ]\n\n for job in jobs:\n try:\n with db.transaction():\n new_job = Job.create(\n job_name = job[0],\n start_date = job[1],\n end_date = job[2],\n salary = job[3],\n person_employed = job[4],\n )\n new_job.save()\n\n except Exception as e:\n logger.info(f'Error creating job = {job[0]}')\n logger.info(e)\n logger.info('See how the datbase protects data across tables')\n\n logger.info(\"don't forget - but can you find a better way?\")\n db.close()", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def test_set_leader(self):\n self._mock_api(200, u'234')\n #set w/o a TTL or a name\n self.assertEquals(self.client.election.set('/mysql'), u'234')\n self.assertEquals(self.client.election.set(\n '/mysql',\n name='foo.example.com',\n ttl=60), u'234')\n self._mock_api(500, 'leader name required')\n self.assertRaises(etcd.EtcdException, self.client.election.set,'/mysql')", "def employees(self, employees: object):\n\n self._employees = employees", "def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def attempt_to_acquire_leader(self, permanent=False):", "def employee(self) -> object:\n return self._employee", "def init_predictions_table(model: Model, settings: Model) -> None:\n names = [\n \"Issue Key\",\n \"Priority\",\n \"Area of Testing\",\n \"Time to Resolve\",\n \"Summary\",\n ]\n\n for position, name in enumerate(names, 1):\n model.objects.create(\n name=name, is_default=True, position=position, settings=settings\n )", "def do_test_we_are_the_leader(self, h_is_leader, h_leader_set):\n states = r_state.r_get_states()\n r_state.remove_state(LEADER_STATE)\n no_leader = r_state.r_get_states()\n r_state.set_state(LEADER_STATE)\n leader = r_state.r_get_states()\n self.assertNotEquals(no_leader, leader)\n self.assertEquals(no_leader.union(set([LEADER_STATE])), leader)\n\n is_leader_call_count = h_is_leader.call_count\n leader_set_call_count = h_leader_set.call_count\n # is_leader() fails\n h_is_leader.return_value = False\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 1, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 0, h_leader_set.call_count)\n\n def raise_fail(*args, **kwargs):\n \"\"\"\n Simulate a leader_set() failure.\n \"\"\"\n raise Exception(\"oops\")\n\n # is_leader() succeeds, but leader_set() fails\n h_is_leader.return_value = True\n h_leader_set.side_effect = raise_fail\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 2, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 1, h_leader_set.call_count)\n\n self.lset_args = None\n self.lset_kwargs = None\n\n def record_leader_set_args(*args, **kwargs):\n \"\"\"\n Make sure leader_set() was invoked with the correct parameters.\n \"\"\"\n self.lset_args = args\n self.lset_kwargs = kwargs\n\n # ...and now it all works out\n h_is_leader.return_value = True\n h_leader_set.side_effect = record_leader_set_args\n testee.we_are_the_leader()\n self.assertEquals(leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 3, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 2, h_leader_set.call_count)\n self.assertEquals((), self.lset_args)\n self.assertEquals(\n {\"charm_storpool_block_unit\": sputils.MACHINE_ID}, self.lset_kwargs\n )\n\n r_state.r_set_states(states)", "def this_needs_work_test_hook_leader_elected(\n self, h_is_leader, h_leader_set\n ):\n self.do_test_we_are_the_leader(h_is_leader, h_leader_set)", "def perform_create(self, serializer):\n # required for perform_create(); creates the score object in database\n score = serializer.save()\n\n # trigger update function for engine (bayes update if adaptive)\n log.debug(\"Triggering engine update from score\")\n engine = get_engine()\n engine.update_from_score(score.learner, score.activity, score.score)", "def add_employee(self, employee):\n self.employees.add(employee)", "def create_model(self):\n pass", "def create_model(self):\n pass", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def create_user(schools_dictionnary, domains_to_skills_dictionnary, companies, places, skills_oh, places_oh, domains_oh, rng, _id):\n\n age = rng.randint(20,60)\n schools = rng.choice(list(schools_dictionnary.keys()), rng.choice([1, 2], p = [0.95, 0.05]), replace = False) \n\n available_skills = list(set([skill for school in schools \\\n for domain in schools_dictionnary[school].domains \\\n for skill in domains_to_skills_dictionnary[domain]]))\n\n expo = np.round(rng.exponential(0.3) * len(schools)) + age // 17 + 1\n\n nb_skills_to_choose = min(int(expo), 5 + (len(schools) - 1) * 3)\n\n _skills = rng.choice(available_skills, nb_skills_to_choose, replace = False)\n\n company = rng.choice(companies)\n place = rng.choice(places)\n\n user = User(skills_oh, places_oh, domains_oh, schools_dictionnary, skills = _skills, age = age, place = place, company = company,\n schools = schools, _id = _id)\n\n return user", "def get_create_employee_query(self):\n template = \"\"\"\n mutation createEmployee {{\n createEmployee(input: {{ {params} }}) {{\n employee {{\n name\n hiredOn\n salary\n departmentId\n }}\n }}\n }}\n \"\"\"\n # Add input parameters as needed\n input_params = 'name:\"{}\",'.format(self.name)\n\n if self.hired_on is not None:\n input_params += 'hiredOn: \"{}\", '.format(self.hired_on)\n\n if self.salary is not None:\n input_params += 'salary: {}, '.format(self.salary)\n\n if self.department_id is not None:\n input_params += 'departmentId: {}'.format(self.department_id)\n\n return template.format(params=input_params)", "def get_leaderboard(request):\n\n includedUsers = User.objects.filter(hide_leaderboard=False, is_staff=False)\n\n # ordered list of points, index denoting leaderboard position (rank)\n # distinct values means that everyone with the same points has the same rank\n rankings = []\n for item in includedUsers.values(\"points\").distinct().order_by(\"-points\"):\n rankings.append(item[\"points\"])\n\n includedUsers = includedUsers.order_by(\"-points\")\n\n paginationData = []\n for user in includedUsers:\n # rank is the index of the users points +1 (converting from 0-indexing)\n data = {\"user\": user, \"rank\": rankings.index(user.points) + 1}\n paginationData.append(data)\n\n return JsonResponse(\n json_paginator(request, paginationData, lb_serializer),\n status=200,\n )", "def create_models( self ):", "async def post_leaderboard(\n self,\n ctx: commands.Context,\n leaderboard_type: Literal[\n \"season\",\n \"weekly\",\n \"worst\",\n \"playoffs\",\n \"playoffs_weekly\",\n \"pre-season\",\n \"pre-season_weekly\",\n ],\n ) -> None:\n leaderboard_type_str = leaderboard_type.replace(\"_\", \" \").title()\n leaderboard = await self.pickems_config.guild(ctx.guild).leaderboard()\n if leaderboard == {} or leaderboard is None:\n await ctx.send(_(\"There is no current leaderboard for this server!\"))\n return\n if leaderboard_type != \"worst\":\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][leaderboard_type], reverse=True\n )\n else:\n leaderboard = sorted(\n leaderboard.items(), key=lambda i: i[1][\"total\"] - i[1][\"season\"], reverse=True\n )\n msg_list = []\n count = 1\n user_position = None\n total_str = {\n \"season\": \"total\",\n \"playoffs\": \"playoffs_total\",\n \"pre-season\": \"pre-season_total\",\n }.get(leaderboard_type, \"total\")\n\n for member_id in leaderboard:\n if str(member_id[0]) == str(ctx.author.id):\n user_position = leaderboard.index(member_id)\n member = ctx.guild.get_member(int(member_id[0]))\n if member is None:\n member_mention = _(\"User has left the server \") + member_id[0]\n else:\n member_mention = member.mention\n if leaderboard_type in [\"weekly\", \"playoffs_weekly\", \"pre-season_weekly\"]:\n points = member_id[1].get(leaderboard_type, 0)\n msg_list.append(\"#{}. {}: {}\\n\".format(count, member_mention, points))\n elif leaderboard_type in [\"season\", \"playoffs\", \"pre-season\"]:\n total = member_id[1].get(total_str, 0)\n wins = member_id[1].get(leaderboard_type, 0)\n try:\n percent = (wins / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {wins}/{total} correct ({percent:.4}%)\\n\"\n )\n else:\n total = member_id[1].get(total_str, 0)\n losses = member_id[1].get(total_str, 0) - member_id[1].get(leaderboard_type)\n try:\n percent = (losses / total) * 100\n except ZeroDivisionError:\n percent = 0.0\n msg_list.append(\n f\"#{count}. {member_mention}: {losses}/{total} incorrect ({percent:.4}%)\\n\"\n )\n count += 1\n leaderboard_list = [msg_list[i : i + 10] for i in range(0, len(msg_list), 10)]\n if user_position is not None:\n user = leaderboard[user_position][1]\n wins = user[\"season\"]\n total = user[total_str]\n losses = user[total_str] - user[\"season\"]\n position = _(\n \"{member}, you're #{number} on the {leaderboard_type} leaderboard!\\n\"\n ).format(\n member=ctx.author.display_name,\n number=user_position + 1,\n leaderboard_type=leaderboard_type_str,\n )\n if leaderboard_type == \"season\":\n percent = (wins / total) * 100\n position += _(\"You have {wins}/{total} correct ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n elif leaderboard_type == \"worst\":\n percent = (losses / total) * 100\n position += _(\"You have {wins}/{total} incorrect ({percent:.4}%).\").format(\n wins=wins, total=total, percent=percent\n )\n await ctx.send(position)\n await BaseMenu(\n source=LeaderboardPages(pages=leaderboard_list, style=leaderboard_type_str),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def create_user_start_program_advices_list_empty(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=4, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n weight = 60\n ProfileUser.objects.create(user=user_created, starting_weight=weight,\n actual_goal_weight=10, final_weight=50)\n self.add_user_results(50, user_created, weight)\n user = HistoryUser.objects.get(user=user_created)\n user.start_questionnaire_completed = True\n user.save()\n\n return user_created", "def assign_house(request):\n user_pk = request.POST.get('userPK')\n user = get_user_model().objects.get(pk=user_pk)\n house_name = request.POST.get('houseName')\n house = House.objects.get(mailing_list=house_name)\n term = Term.objects.get_by_url_name(request.POST.get('term'))\n\n house_member, created = HouseMember.objects.get_or_create(\n user=user, term=term, defaults={'house': house})\n\n # Find out if the user is a house leader for setting is_leader correctly\n user_house_leader = get_object_or_none(\n Officer, position__short_name='house-leaders',\n user__id=user_pk, term=term)\n\n if user_house_leader is not None:\n # Find out if there is already a house leader for this house\n existing_house_leader = get_object_or_none(\n HouseMember, house=house, is_leader=True, term=term)\n\n if existing_house_leader is not None:\n # If there is already a house leader in that house and we're trying\n # to add a house leader, delete any newly created HouseMember object\n # and return a 400 error to be handled by jQuery\n if created:\n house_member.delete()\n\n return json_response(status=400)\n else:\n house_member.is_leader = True\n\n # If an object was gotten, the user was in a different house previously\n if not created:\n house_member.house = house\n\n house_member.save()\n\n return json_response()", "def make_employee_dict(names, ID_numbers, salaries, email_addresses):\r\n d = dict()\r\n for i in range(len(names)):\r\n d[ID_numbers[i]] = Employee(names[i], ID_numbers[i], salaries[i], email_addresses[i])\r\n return d", "def leaderboard_changing():\r\n inp = open('Leaderboard.txt', 'r')\r\n global leaders, name, score\r\n leaderboard = inp.readlines()\r\n if leaderboard != ['\\n']:\r\n for i in leaderboard:\r\n leaders.append((int(i.split(\" \", 1)[0]), i.split(\" \", 1)[1]))\r\n inp.close()\r\n for i in leaders:\r\n if score >= i[0]:\r\n leaders.insert(leaders.index(i), (score, name + \"\\n\"))\r\n break\r\n\r\n if leaders == [] or score < leaders[-1][0]:\r\n leaders.append((score, name + \"\\n\"))\r\n\r\n inp = open('Leaderboard.txt', 'w')\r\n for i in leaders:\r\n inp.write(str(i[0]) + \" \" + i[1])\r\n inp.close()" ]
[ "0.65373933", "0.6418469", "0.6274147", "0.6274147", "0.60241544", "0.5993524", "0.5734152", "0.56874007", "0.5683928", "0.5643717", "0.5590358", "0.55775136", "0.55295014", "0.5501056", "0.54992604", "0.5479858", "0.53781486", "0.535312", "0.5326933", "0.52975243", "0.52967566", "0.5282533", "0.52810675", "0.52786106", "0.5277657", "0.52728784", "0.5267124", "0.52667147", "0.5211703", "0.52113485", "0.5202566", "0.51987267", "0.51926816", "0.5179166", "0.517819", "0.51577836", "0.5144048", "0.51427686", "0.5134719", "0.5134395", "0.5111502", "0.50540334", "0.50509435", "0.5049864", "0.5045701", "0.50375557", "0.5023352", "0.5022408", "0.5019426", "0.5005038", "0.5002442", "0.49961817", "0.4969365", "0.496557", "0.49635962", "0.49593303", "0.49434382", "0.49391755", "0.49299744", "0.49287754", "0.49262106", "0.49206185", "0.49127802", "0.4904012", "0.4873363", "0.48702997", "0.4870005", "0.48597535", "0.48588598", "0.48467267", "0.48394844", "0.48242858", "0.48227718", "0.48186642", "0.48173612", "0.48159298", "0.48087963", "0.48006833", "0.47855508", "0.47716865", "0.47688794", "0.47669142", "0.4758399", "0.47495633", "0.474953", "0.4745459", "0.47406882", "0.47382122", "0.4718165", "0.4718165", "0.4716686", "0.47161394", "0.47158462", "0.47147623", "0.47115704", "0.47082645", "0.4707085", "0.47034746", "0.47022924", "0.4698166" ]
0.63167363
2
Create LeaderModel and send it as a PDF to the browser
def get_leader_model_pdf(currentEmpl, employees): lm = LeaderModel() employee_actions = {} legend = [] colors = {} errors = {'noactions': []} # numbered_actions = {} for empl in employees: if not currentEmpl.hasAccessTo(empl): raise PermissionDenied() actions = empl.action_set.all() if not len(actions): errors['noactions'].append(empl) continue lkey = empl.user.first_name + " " + empl.user.last_name legend.append(lkey) if not lkey in employee_actions: employee_actions[lkey] = {} for action in actions: if not action.difficulty or not action.type: errors['noactions'].append(empl) continue circle_number = lm.addCircle(action) latest_comment = action.getLatestComment() employee_actions[lkey][circle_number] = { 'name': action.title, 'type': action.type, 'difficulty': action.getDifficultyText(), 'comment': latest_comment } if lkey not in colors: color = lm.getEmployeeColors(empl.id) colors[lkey] = "rgb({}, {}, {})".format(color[0], color[1], color[2]) if len(errors['noactions']): return errors lm_filename = path.join(settings.STATIC_ROOT, "leadermodel_{}.png".format(currentEmpl.id)) lm.writeImage(lm_filename) # # Write PDF pdfFilename = path.join(settings.FILES_ROOT, "leadermodel_{}.pdf".format(currentEmpl.id)) template = get_template('mus/leader_model_pdf.html') context = Context({ 'site_url': settings.SITE_URL, 'lm_filename': lm_filename, 'employee_actions': employee_actions, 'colors': colors, 'legend': legend }) html = template.render(context) # html = html.replace('<li>','<li><img class="square" src="http://test.nxtlvl.dk/static/img/square.png" />') result = open(pdfFilename, 'wb') pisa.pisaDocument(StringIO.StringIO( html.encode("UTF-8")), dest=result) result.close() wrapper = FileWrapper(file(pdfFilename)) response = HttpResponse(wrapper, content_type='application/pdf') response['Content-Disposition'] = 'attachment;filename=ledermodel.pdf' response['Content-Length'] = os.path.getsize(pdfFilename) return response # return HttpResponseRedirect('/employee/all/%d' % int(company_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )", "def pdfReceiver(request, model=''):\n\n\tinput_str = ''\n\tinput_str += parsePOST(request)\n\t# packet = io.StringIO() # write to memory\n\tpacket = io.BytesIO()\n\n\ttry:\n\t\tpisa.CreatePDF(input_str, dest=packet)\n\texcept ValueError as error:\n\t\t# triggered from the elusive invalid color value issue:\n\t\tlogging.warning(\"elusive invalid color value, defaulting html background-color to FFFFFF\")\n\t\tpisa.CreatePDF(input_str, dest=packet, default_css=\"body{background-color:#FFFFFF;}\")\n\n\n\tjid = MetabolizerCalc().gen_jid() # create timestamp\n\tresponse = HttpResponse(packet.getvalue(), content_type='application/pdf')\n\tresponse['Content-Disposition'] = 'attachment; filename=' + model + '_' + jid + '.pdf'\n\tpacket.close() # todo: figure out why this doesn't solve the 'caching problem'\n\treturn response", "def create_pdf(request):\n\n contact_info = ContactDetails.objects.iterator()\n\n # Create a file-like buffer to receive PDF data.\n buffer = io.BytesIO()\n\n # Create the PDF object, using the buffer as its \"file.\"\n pdf_file = canvas.Canvas(buffer)\n\n # Draw things on the PDF. Here's where the PDF generation happens\n pdf_file.setTitle(\"Contact Infomation\")\n pdf_file.setFont(\"Helvetica-Bold\", 20, leading=None)\n pdf_file.setFillColorRGB(1,0,0)\n pdf_file.drawString( 60, 800, \"Stefanos Taramas Contact Information\")\n pdf_file.setFillColorRGB(0,0,0)\n pdf_file.setFont(\"Helvetica\", 15, leading=None)\n\n for index, item in enumerate(contact_info):\n line = str(index + 1) +\") \" + str(item.contact_name) + \": \" + str(item.contact_info)\n column = 50\n row = 750 - 15*index\n pdf_file.drawString(column, row, line)\n\n # Close the PDF object cleanly\n pdf_file.showPage()\n pdf_file.save()\n\n # FileResponse sets the Content-Disposition header so that browsers\n # present the option to save the file.\n buffer.seek(0)\n\n return FileResponse(buffer, as_attachment=True, filename='StefanosTaramasContactInfo.pdf')", "def bundle(handler, model):\n\n notebook_filename = model['name']\n notebook_name = os.path.splitext(notebook_filename)[0]\n pdf_filename = '{}.pdf'.format(notebook_name)\n\n with io.BytesIO() as pdf_buffer:\n pdf_body = convert_notebook_to_pdf(model)\n pdf_buffer.write(pdf_body)\n\n handler.set_attachment_header(pdf_filename)\n handler.set_header('Content-Type', 'application/pdf')\n\n # Return the buffer value as the response\n handler.finish(pdf_buffer.getvalue())", "def generate_pdf(request):\n reg_no = request.user.username\n user = get_object_or_404(User, username=reg_no)\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n \n response = HttpResponse(mimetype='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=JAM2012_Allottment.pdf'\n \n elements = []\n doc = SimpleDocTemplate(response)\n \n formatted_time = time.ctime()\n styles = getSampleStyleSheet()\n styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))\n \n ptext = '<font size=15>JAM 2012 - Admissions.</font>' \n elements.append(Paragraph(ptext, styles[\"Justify\"]))\n elements.append(Spacer(4, 20))\n \n ptext = '<font size=12>Registration Number: %s</font>' % reg_no \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n \n data = [] \n options = get_chosen_options(user) ##Put a check to show when the options chosen is empty\n \n if not(options):\n ptext = '<font size=12>No choices were selected.</font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n doc.build(elements) \n return response \n \n ptext = '<font size=12>The choices selected by me are as follows: </font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 30))\n \n counter = 1\n for opt in options:\n data.append([counter, opt.opt_code, opt.opt_location, opt.opt_name])\n counter = counter + 1\n \n t = Table(data)\n t.setStyle(TableStyle([('GRID',(0,0),(3,len(options)),1,colors.black),\n ('TEXTCOLOR',(0,0),(0,-1),colors.green)]))\n \n elements.append(t) \n \n elements.append(Spacer(4, 30))\n \n ptext = '<font size=12>I hereby declare that the order of preference given by me for my eligible programmes is final. </font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 25))\n \n ptext = '<font size=12>Signature of the Candidate</font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 20))\n \n ptext = '<font size=12>%s</font>' % formatted_time\n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n \n doc.build(elements)\n \n return response", "def _create_pdf(self, survey, response):\n pdf_transformer = PDFTransformer(survey, response)\n self._pdf, self._page_count = pdf_transformer.render_pages()\n return self._pdf", "def createpdf():\n with app.app_context():\n # Get form data\n if request.form:\n data = request.form\n else:\n return 'no form'\n msg = {}\n msg['name'] = data['name']\n msg['role'] = data['role']\n msg['unit'] = data['unit']\n msg['unitdetail'] = data['unitdetail']\n msg['phone'] = data['phone']\n msg['email'] = data['email']\n msg['employmentdate'] = data['employmentdate']\n filename = 'default.png'\n if 'img' in request.files:\n file = request.files['img']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename).replace(\"_\",\"\")\n portraitFilePath = os.path.join(app.config['IMAGE_UPLOADS'], filename)\n file.save(portraitFilePath)\n if 'presentation' in data:\n msg['presentation'] = data['presentation']\n if 'edu-title' in data:\n msg['edu'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('edu-title'), request.form.getlist('edu-time'))]\n msg['edu'].sort(key = itemgetter('title'))\n msg['edu'].sort(key = itemgetter('time'), reverse=True)\n if 'emp-title' in data:\n msg['emp'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('emp-title'), request.form.getlist('emp-time'))]\n msg['emp'].sort(key = itemgetter('title'))\n msg['emp'].sort(key = itemgetter('time'), reverse=True)\n if 'cou-title' in data:\n msg['cou'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('cou-title'), request.form.getlist('cou-time'))]\n msg['cou'].sort(key = itemgetter('title'))\n msg['cou'].sort(key = itemgetter('time'), reverse=True)\n if 'ass-title' in data:\n msg['ass'] = [{'title': i, 'company': j, 'role': k, 'descr': l, 'time': m} for i,j,k,l,m in zip(request.form.getlist('ass-title'), request.form.getlist('ass-company'), request.form.getlist('ass-role'), request.form.getlist('ass-descr'), request.form.getlist('ass-time'))]\n msg['ass'].sort(key = itemgetter('title'))\n msg['ass'].sort(key = itemgetter('time'), reverse=True)\n\n cv = TEXTEMPLATE.render(msg = msg, portrait = 'img/' + filename)\n pdf = writeTex(cv, app.config[\"OUT_DIR\"], filename)\n deleteImgUpload(filename)\n return redirect(\"/getpdf/\" + pdf)", "def toPDF(Infos):\n\n\n #returnPDF = PDFDocument(\"output\")\n #returnPDF.Infos.get(\"name\")\n returnPDF = PDF(\"Courier\", Infos.get(\"name\"))\n if Infos.get('contact'):\n returnPDF.contact(Infos.get(\"contact\"))\n if Infos.get('Current position'):\n returnPDF.currentposition(Infos.get(\"Current position\"))\n if Infos.get('Education'):\n returnPDF.currentposition(Infos.get(\"Education\"))\n if Infos.get('Langue'):\n returnPDF.currentposition(Infos.get(\"Langue\"))\n returnPDF.output(\"result.pdf\", 'F')", "def create_bill_pdf(obj):\n data = {\n 'today': datetime.date.today(),\n 'amount': obj.price,\n 'customer_name': obj.company.company_name,\n 'order_id': obj.pk,\n }\n pdf = render_to_pdf('pdf/invoice.html', data)\n filename = obj.company.company_name + '_' + obj.promotion.campaign_name + '_' + \\\n datetime.datetime.now().strftime(\"%Y-%m-%d\") + '.pdf'\n obj.bill.save(filename, File(io.BytesIO(pdf.content)))", "def make_pdf(self, net_id, request_id, request_date):\n with open(\"{0}/user_uploads/{1}/{2}/submission.json\".format(self.__APP_PATH__, net_id, request_id), mode=\"r\") as json_file:\n request_details = json.load(json_file)\n\n files_text = \"\"\n travel_text = None\n\n if request_details[\"request_type\"] == \"travel\":\n travel_text = \"\\n\\nTravel Details:\\n\" \\\n \"\\t\\t\\t\\tTravel from: {0} ({1})\\n\" \\\n \"\\t\\t\\t\\tTravel to: {2} ({3})\\n\" \\\n \"\\t\\t\\t\\tTravel Number: {4}\\n\" \\\n \"\\t\\t\\t\\tEvent Website: {5}\".format(request_details[\"travel_from\"],\n request_details[\"travel_from_date\"],\n request_details[\"travel_to\"],\n request_details[\"travel_to_date\"],\n request_details[\"travel_number\"],\n request_details.get(\"event_website\", \"N/A\"))\n for file in request_details[\"files\"]:\n amount_text = \"${0}\".format(file[\"dollar_amount\"]) if file[\"dollar_amount\"] > 0.0 else \"Auxiliary File\"\n files_text += \"\\t\\t\\t\\t{0} ({1})\\n\\t\\t\\t\\t\\t\\t\\t\\t\" \\\n \"{2}\\n\\t\\t\\t\\t\\t\\t\\t\\t{3}\\n\\n\".format(file[\"label\"], amount_text,\n file[\"name\"], file[\"description\"])\n\n if request_details[\"notes\"].strip():\n request_notes = \"\\nNotes:\\n{0}\".format(request_details[\"notes\"].strip())\n else:\n request_notes = \"\"\n\n pdf_title = \"({0}) {1:02d}/{2:02d}/{3:04d} - {4:02d}:{5:02d}:{6:02d}, Amount: ${7}\".format(\n request_details[\"request_date\"][\"weekday\"], request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"], request_details[\"request_date\"][\"year\"],\n request_details[\"request_date\"][\"hours\"], request_details[\"request_date\"][\"minutes\"],\n request_details[\"request_date\"][\"seconds\"], request_details[\"total_amount\"])\n\n if request_details[\"pay_to\"][\"id\"]:\n pay_to_details = \"{0} ({1}, {2})\".format(request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"id\"],\n request_details[\"pay_to\"][\"email\"])\n else:\n pay_to_details = \"{0} ({1})\".format(request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n\n pdf_body = \"{0}{1}\\n\\nRequestee: \\n\\t\\t\\t\\tAccount:{2}\\n\\t\\t\\t\\tName: {3} {4} ({5})\\n\\t\\t\\t\\t\" \\\n \"Phone: {6}\\t|\\tNet ID: {7}\\t\\n\\nPay To:\\n\\t\\t\\t\\tName: {8}{9}\\n\\n\" \\\n \"Files:\\n{10}\".format(request_details[\"short_description\"], request_notes,\n request_details[\"account_number\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_details[\"requester\"][\"email\"],\n request_details[\"requester\"][\"phone_number\"],\n request_details[\"requester\"][\"net_id\"],\n pay_to_details,\n travel_text,\n files_text)\n try:\n logo_path = \"{0}/static/assets/main/uta_logo.png\".format(self.__APP_PATH__.split(\"/apps/\")[0])\n pdf = PDFMaker(**{\"title\": \"Reimbursement Request Report\"})\n\n pdf.set_margins(left=19.05, top=19.05, right=19.05)\n pdf.set_auto_page_break(auto=True, margin=19.05)\n pdf.set_author(\"MavApps - Reimbursement App\")\n pdf.print_page(pdf_title, pdf_body)\n pdf.image(logo_path, x=53, y=11, w=107, h=10, type=\"PNG\", link=\"https://uta.edu\")\n pdf.output(\"{0}/user_uploads/{1}/{2}/[{1}-{3}]_report.pdf\".format(self.__APP_PATH__, net_id, request_id, request_date), \"F\")\n except Exception as e:\n print(e)\n return False\n return True", "def book(**kwargs):\n print(\"pdf created\")", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def generate_pdf(self):\n x = 100\n y = 100\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=\"A4\")\n p.drawString(x, y, \"TO DO\")\n p.showPage()\n p.save()\n pdf = buffer.getvalue()\n buffer.close()\n return pdf", "def generate():\n # Create the list of article from our data\n generator = GenerateLDA()\n generator.generateLDA()\n return jsonify({\"code\": 200, \"message\" : \"LDA model successfully created.\"})", "def generate_contract_de_fdf_pdf(user):\n if DEBUG: # pragma: no cover\n print \"===== this is generate_fdf_pdf\"\n from fdfgen import forge_fdf\n fields = [\n ('surname', user.surname),\n ('lastname', user.lastname),\n ('street', user.street),\n ('number', user.number),\n ('postcode', user.postcode),\n ('city', user.city),\n ('email', user.email),\n ('user_id', user.id),\n ('username', user.username),\n ('date_registered', user.date_registered),\n ('date_generated', datetime.now()),\n ]\n #generate fdf string\n fdf = forge_fdf(\"\", fields, [], [], [])\n # write to file\n my_fdf_filename = \"fdf\" + str(user.id) + \".fdf\"\n\n fdf_file = open(my_fdf_filename, \"w\")\n fdf_file.write(fdf)\n fdf_file.close()\n if DEBUG: # pragma: no cover\n print \"fdf file written.\"\n\n res = os.popen(\n 'pdftk pdftk/berechtigungsvertrag-2.2.pdf fill_form %s output'\n ' formoutput.pdf flatten' % my_fdf_filename).read()\n\n if DEBUG: # pragma: no cover\n print res\n print \"done: put data into form and finalized it\"\n\n # delete the fdf file\n res = os.popen('rm %s' % my_fdf_filename)\n if DEBUG: # pragma: no cover\n print res\n print \"combining with bank account form\"\n # combine\n res = os.popen(\n 'pdftk formoutput.pdf pdftk/bankaccount.pdf output output.pdf').read()\n if DEBUG: # pragma: no cover\n print res\n print \"combined personal form and bank form\"\n\n # delete the fdf file\n os.popen('rm formoutput.pdf').read()\n\n # return a pdf file\n from pyramid.response import Response\n response = Response(content_type='application/pdf')\n response.app_iter = open(\"output.pdf\", \"r\")\n return response", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)", "def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)", "def generate_pdf(list,id):\n\n doc = SimpleDocTemplate(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+str(id)+\".pdf\")\n\n Story = [Spacer(1,2*inch)]\n styles = stylesheet()\n global Title\n\n # Add 10 questions with boxes below\n for i in list:\n if not i[0] in \"skills-scan\" and not i[0] in \"csrfmiddlewaretoken\" and not i[0] in \"titre\" and not i[0] in \"custom\":\n tmp = int(i[0])+1\n bogustext = (str(tmp)+\". %s\" % i[1])\n p = Paragraph(bogustext, styles['default'])\n # Write the paragraph\n\n draw = Drawing()\n # rect(x1,y1,width,height)\n rec = Rect(0, 100, 450, 150)\n rec.fillColor = colors.white\n # draw the rect under each paragraph\n draw.add(rec)\n p.keepWithNext = True\n Story.append(p)\n Story.append(draw)\n Story.append(Spacer(1,-0.9 * inch))\n elif i[0] in \"titre\":\n Title = i[1]\n # build the document by inserting the whole story\n doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)\n return str(id)+\".pdf\"", "def create_pdf(submission):\n # Get questions from sections\n fields = {}\n for section in submission.questions:\n for form in section[\"forms\"]:\n for field in form[\"fields\"]:\n fs = field.get(\"fields\", [field])\n for f in fs:\n fields[f[\"name\"]] = f\n\n # Pull out image and answers\n images = []\n docs = []\n answers = []\n for answer in submission.answers:\n answer, name = answer.get(\"answer\", \"\"), answer.get(\"name\", \"\")\n field = fields[name]\n if field[\"type\"] == \"FILE\":\n image_ids = []\n doc_ids = []\n for file in answer:\n if \"image\" in file:\n image_ids.append(file[\"id\"])\n elif \"file\" in file:\n doc_ids.append(file[\"id\"])\n\n if image_ids:\n images += [\n image_upload.image\n for image_upload in ImageUpload.objects.filter(\n pk__in=image_ids\n ).all()\n ]\n if doc_ids:\n docs += [\n file_upload.file\n for file_upload in FileUpload.objects.filter(pk__in=doc_ids).all()\n ]\n else:\n answers.append(\n {\n \"name\": name.lower().replace(\"_\", \" \").capitalize(),\n \"prompt\": field.get(\"prompt\", \"\"),\n \"answers\": answer if type(answer) is list else [answer],\n }\n )\n\n context = {\n \"submission\": submission,\n \"answers\": answers,\n \"images\": images,\n \"docs\": docs,\n }\n pdf_html_str = render_to_string(\"client-intake.html\", context=context)\n pdf_bytes = weasyprint.HTML(string=pdf_html_str).write_pdf()\n return pdf_bytes", "def downlaod():\r\n filename = str(uuid.uuid4()) + '.pdf'\r\n filename = os.path.join('./output' , filename)\r\n\r\n config = pdfkit.configuration(wkhtmltopdf = PRG_Path)\r\n options = {\r\n 'page-size': 'Letter'\r\n ,'margin-top': '0.75in'\r\n ,'margin-right': '0.75in'\r\n ,'margin-bottom': '0.75in'\r\n ,'margin-left': '0.75in'\r\n ,'no-outline': None\r\n ,'encoding':'UTF-8'\r\n ,'enable-local-file-access':None\r\n ,'quiet': ''\r\n # ,'javascript-delay':2000000\r\n }\r\n\r\n\r\n html = create_html_report()\r\n pdf = pdfkit.from_string(input=html, output_path=filename,configuration=config, options=options)\r\n pdfDownload = open(filename,'rb').read()\r\n\r\n response: Response = Response (\r\n pdfDownload\r\n ,mimetype=\"application/pdf\"\r\n ,headers={\r\n \"Content-disposition\": \"attachment; filename=\" + filename\r\n ,\"Content-type\": \"application/force-download\"\r\n }\r\n )\r\n return response", "def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )", "def save_pdf(self, response):\n\n # get metadata\n file_type = \"__comprovante_de_acesso__\"\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n self.result.update({file_type: {\"file_id\": file_id}})", "def generate_recipt(investor_name, total_prices):\n \n pdf = fpdf.FPDF(format='letter') \n total = 0.0\n pdf.add_page() \n pdf.set_font(\"Arial\", size=12) \n pdf.cell(200, 10, txt='******************************************', ln=1, align=\"L\")\n pdf.cell(200,10, txt=' Recipt ',ln=2, align=\"L\")\n pdf.cell(200, 10, txt='******************************************', ln=3, align=\"L\")\n pdf.cell(200,10, txt=f'Date: {datetime.now().strftime(\"%B %d, %Y\")}', ln=4, align=\"L\")\n pdf.cell(200,10, txt=f'Investor Name: {investor_name.title()}', ln=5, align=\"L\")\n pdf.cell(200, 10, txt='******************************************', ln=6, align=\"L\")\n temp =6\n for symbol,individual_cost in total_prices.items():\n pdf.cell(200, 10, txt=f'{symbol} {individual_cost:.2f}' ,ln=temp+1, align=\"L\" )\n total = calculate_total_price(total_prices)\n \n pdf.cell(200,10, txt=f'Your Total excluding tax : {total:.2f}',ln= temp+1,align=\"L\")\n pdf.cell(200, 10, txt='******************************************', ln=temp+1, align=\"L\")\n try:\n os.makedirs(\"outputs\")\n except OSError as exc: \n if exc.errno != errno.EEXIST:\n raise\n try:\n pdf.output(\"outputs/recipt.pdf\")\n except Exception as e:\n print(f'generate_recipt encountered {e} exception')", "def pdf_manager(self):\n\n s3ocr_root = self.s3ocr_etree() # get element s3xml\n\n # define font size\n titlefontsize = 18\n sectionfontsize = 15\n regularfontsize = 13\n hintfontsize = 10\n \n # etree labels\n ITEXT = \"label\"\n HINT = \"comment\"\n TYPE = \"type\"\n HASOPTIONS = \"has_options\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n\n #l10n\n l10n = self.l10n\n\n # get pdf title\n if self.pdftitle == None or self.pdftitle == \"\":\n try:\n pdftitle = self.manager.s3.crud_strings[\\\n self.tablename].subtitle_list.decode(\"utf-8\")\n except:\n pdftitle = self.resource.tablename\n else:\n pdftitle = self.pdftitle\n\n # prepare pdf\n form = Form()\n form.decorate()\n\n # set header\n form.canvas.setTitle(pdftitle) # set pdf meta title\n form.print_text([pdftitle,],\n fontsize=titlefontsize,\n style=\"center\") # set pdf header title\n\n form.print_text(\n [\n unicode(l10n.get(\"ocr_inst\").get(\"inst1\").decode(\"utf-8\")),\n unicode(l10n.get(\"ocr_inst\").get(\"inst2\").decode(\"utf-8\")),\n unicode(l10n.get(\"ocr_inst\").get(\"inst3\").decode(\"utf-8\"))\n ],\n fontsize=regularfontsize,\n gray=0)\n form.linespace(3)\n # printing the etree\n for eachresource in s3ocr_root:\n form.draw_line()\n form.print_text([\n eachresource.attrib.get(ITEXT,\n eachresource.attrib.get(\"name\"))\n ],\n fontsize=sectionfontsize)\n form.draw_line(nextline=1)\n form.linespace(12) # line spacing between each field\n for eachfield in eachresource.iterchildren():\n fieldlabel = eachfield.attrib.get(ITEXT)\n spacing = \" \" * 5\n fieldhint = self.__trim(eachfield.attrib.get(HINT))\n if fieldhint != \"\" and fieldhint != None:\n form.print_text([\"%s%s( %s )\" % \\\n (fieldlabel,\n spacing,\n fieldhint)],\n fontsize=regularfontsize)\n else:\n form.print_text([fieldlabel],\n fontsize=regularfontsize)\n\n if eachfield.attrib.get(\"readable\", \"False\") == \"True\" and \\\n eachfield.attrib.get(\"writable\", \"False\") == \"False\":\n # if it is a readonly field\n form.print_text(\n [eachfield.attrib.get(\"default\",\"No default Value\")],\n seek=10,\n )\n elif eachfield.attrib.get(HASOPTIONS) == \"True\":\n fieldtype = eachfield.attrib.get(TYPE)\n # if the field has to be shown with options\n if fieldtype == \"boolean\":\n form.nextline()\n form.resetx()\n bool_text = l10n.get(\"boolean\")\n form.print_text(\n [bool_text.get(\"yes\").decode(\"utf-8\")],\n continuetext=1,\n seek=3,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n form.print_text(\n [bool_text.get(\"no\").decode(\"utf-8\")],\n continuetext=1,\n seek=10,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n else:\n if fieldtype == \"multiselect\":\n option_hint = l10n.get(\"select\").get(\"multiselect\")\n else:\n option_hint = l10n.get(\"select\").get(\"singleselect\")\n form.print_text(\n [option_hint.decode(\"utf-8\")],\n fontsize=hintfontsize,\n gray=0.4,\n seek=3,\n )\n s3ocrselect = eachfield.getchildren()[0]\n form.nextline(regularfontsize)\n form.resetx() # move cursor to the front\n optionseek = 10\n # resting margin for options\n formmargin = form.marginsides\n form.marginsides = optionseek + formmargin\n for eachoption in s3ocrselect.iterchildren():\n form.print_text(\n [eachoption.text],\n continuetext=1,\n fontsize = regularfontsize,\n seek = 10,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n # restoring orginal margin\n form.marginsides = formmargin\n \n else:\n # if it is a text field\n fieldtype = eachfield.attrib.get(TYPE)\n BOXES_TYPES = [\"string\", \"textbox\", \"integer\",\n \"double\", \"date\", \"datetime\",]\n if fieldtype in BOXES_TYPES:\n if fieldtype in [\"string\", \"textbox\"]:\n form.linespace(3)\n num_lines = int(eachfield.attrib.get(\"lines\",\n 1))\n for eachline in xrange(num_lines):\n # TODO: Store positions\n form.draw_check_boxes(\n completeline=1,\n gray=0.9,\n seek=3,\n )\n elif fieldtype in [\"integer\", \"double\"]:\n num_boxes = int(eachfield.attrib.get(\"boxes\",\n 9))\n form.linespace(3)\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = num_boxes,\n gray=0.9,\n seek=3,\n )\n elif fieldtype in [\"date\", \"datetime\"]:\n # print hint\n hinttext = \\\n l10n.get(\"datetime_hint\").get(fieldtype).decode(\"utf-8\")\n form.print_text(\n [hinttext],\n fontsize=hintfontsize,\n gray=0.4,\n seek=3,\n )\n form.linespace(8)\n datetime_continuetext = 0\n datetime_seek = 3\n if fieldtype == \"datetime\":\n datetime_continuetext = 1\n datetime_seek = 6\n #HH\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n seek = 3,\n )\n #MM\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n # DD\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext = datetime_continuetext,\n seek = datetime_seek,\n )\n # MM\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n # YYYY\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 4,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n else:\n self.r.error(501, self.manager.PARSE_ERROR)\n print sys.stderr(\"%s :invalid field type: %s\" %\\\n (eachfield.attrib.get(\"name\"),\n fieldtype))\n return form.save()", "def print_receipt(Student):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, 'Student Dues Payment Receipt')\n pdf.ln()\n pdf.multi_cell(0, 5, ('Student ID: %s' % Student.student_ID))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Name: %s' % Student.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Mess Fees: %s' % Student.mess_charge))\n pdf.ln()\n\n if Student.room_type == \"S\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"single_room_rent\")[0]\n elif Student.room_type == \"D\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"double_room_rent\")[0]\n\n pdf.multi_cell(0, 5, ('Room Rent: %s' % room_rent))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Amenities Charge: %s' % str(db.get(\"hall\", Student.hall_ID, \"amenities_charge\")[0])))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Total Amount Paid: %s' % str(Student.total_dues)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('receipt_%s.pdf' % Student.hall_ID), 'F')", "def generate_pdf_flask_response(pdf_data):\n html = HTML(string=pdf_data)\n\n return render_pdf(html)", "def create_training_file(self):\n self.master.switch_frame(TrainingFileView)", "def generate_report_pre_save(model, path, contents_manager, **kwargs):\n if model['type'] != 'notebook':\n return\n\n notebook = model['content']\n base, ext = os.path.splitext(path)\n output_filename = \"{}.pdf\".format(base)\n template_filename = 'custom.tplx'\n notebook_to_pdf = load_module('notebook_to_pdf', contents_manager.root_dir)\n # Make sure that we continue working even if the conversion fails\n try:\n notebook_to_pdf.convert_notebook_to_pdf(notebook, output_filename, template_filename)\n except Exception as e:\n contents_manager.log.error(e, exc_info=True)", "def ReporteSolicitudesDeCambio(request, id_proyecto):\n from reportlab.lib.units import inch, cm\n from reportlab.lib.pagesizes import A4\n\n ancho = A4[0]\n alto = A4[1]\n\n proyecto = Proyecto.objects.get(id=id_proyecto)\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"ReporteSolicitudesDeCambio.pdf\"'\n p = canvas.Canvas(response)\n p.setTitle('Reporte Solicitudes De Cambio del proyecto')\n p.translate(2.3 * cm, 0.3 * cm)\n p.setFont(\"Times-Italic\", 25)\n p.setFillColorRGB(0, 0, 0.5)\n p.drawString(30, alto - 40, \" Reporte Solicitudes De Cambio\")\n p.drawString(30, alto - 80, \" Proyecto : %s\" % proyecto)\n p.setFont(\"Courier-BoldOblique\", 14)\n p.saveState()\n solicitudes = SolicitudItem.objects.all()\n c = 100\n cont = 0\n for temp in solicitudes:\n cont = cont + 1\n if cont >= 1:\n p.setFillColorRGB(0, 0, 0.9)\n c = c + 40\n p.drawString(25, alto - c, \"Item , Linea Base , Usuario Solicitante , Estado\")\n p.setFont(\"Helvetica\", 12)\n c = c + 20\n for i in solicitudes:\n pid = i.item.tipoitem.fase.fkproyecto.id\n if (pid == proyecto.id):\n lb = i.item.lineabase\n if i.completo:\n if i.votossi > i.votosno:\n est = 'APROBADA'\n else:\n est = 'RECHAZADA'\n else:\n est = 'PENDIENTE'\n p.setFillColorRGB(0, 0, 0)\n p.drawString(25, alto - c, \"%s , %s , %s , %s\" % (i.item.nombre, lb, i.solicitante, est))\n c = c + 20\n p.showPage()\n p.save()\n return response", "def get_signature_sheet_pdf(self):\n assert(self.attendance_rate == None)\n\n buffer = BytesIO()\n # set some characteristics for pdf document\n doc = SimpleDocTemplate(\n buffer,\n rightMargin=30,\n leftMargin=40,\n topMargin=40,\n bottomMargin=30,\n pagesize=A4\n )\n # a collection of styles offer by the library\n styles = getSampleStyleSheet()\n # add custom paragraph style\n styles.add(ParagraphStyle(name=\"TableHeader\", fontSize=11, alignment=TA_CENTER))\n # list used for elements added into document\n data = []\n data.append(Paragraph(\"{0} Signature Sheet\".format(self.module), styles['h2']))\n data.append(Paragraph(\"Time: {0} Place: {1}\".format(\n self.time.strftime('%a, %d %b %Y %H:%M'),\n self.place\n ), styles['h2']))\n # insert a blank space\n data.append(Spacer(1, 12))\n table_data = []\n # table header\n table_data.append([\n Paragraph('Student Id', styles['TableHeader']),\n Paragraph('First Name', styles['TableHeader']),\n Paragraph('Last Name', styles['TableHeader']),\n Paragraph('Signature', styles['TableHeader']),\n ])\n attendees = self.module.students.all()\n for a in attendees:\n # add a row to table\n table_data.append([\n a.student_id,\n a.first_name,\n a.last_name,\n '',\n ])\n # create table\n wh_table = Table(table_data, colWidths=[doc.width/4.0]*4)\n wh_table.hAlign = 'LEFT'\n wh_table.setStyle(TableStyle(\n [('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),\n ('BOX', (0, 0), (-1, -1), 0.5, colors.black),\n ('VALIGN', (0, 0), (-1, 0), 'MIDDLE'),\n ('BACKGROUND', (0, 0), (-1, 0), colors.gray)]))\n data.append(wh_table)\n # create document\n doc.build(data)\n pdf = buffer.getvalue()\n buffer.close()\n return pdf", "def download(texttitle):\n try:\n body = current_file.analysed_texts['Regular']\n rendered = render_template('pdf_template.html', title=texttitle, body=body)\n options = {'encoding': \"UTF-8\"}\n pdf = pdfkit.from_string(rendered, False, options=options)\n response = make_response(pdf)\n response.headers[\"Content-Type\"] = 'application/pdf'\n response.headers[\"Content-Disposition\"] = 'attachment; filename=output.pdf'\n\n return response\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(request.referrer)", "def degree_creator():\n return render_template(\"degree_creator.html\")", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def savePDFFile(self):\n s = self.text.get(\"1.0\", tk.END)\n f = open(file, \"w\", encoding='utf-8')\n f.write(s)\n f.close()\n\n # Create a file for each student with their graded files\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n pdf.multi_cell(0, 5, s)\n\n # Removed the \\t from the filepath in order to save as pdf in 'Graded' file\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n pdf.output(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n highlightingTextInFile()", "def vantechy(request):\n return FileResponse(open('/files/presentation.pdf', 'rb'))", "def inscription_summary(request, pk):\n candidat = get_object_or_404(Candidate, pk=pk)\n buff = io.BytesIO()\n pdf = InscriptionSummaryPDF(buff)\n pdf.produce(candidat)\n filename = slugify('{0}_{1}'.format(candidat.last_name, candidat.first_name)) + '.pdf'\n buff.seek(0)\n return FileResponse(buff, as_attachment=True, filename=filename)", "def renderToPdf(envLL, filename, sizex, sizey):\n basefilename = os.path.splitext(filename)[0]\n mergedpdf = None\n for mapname in MAPNIK_LAYERS:\n print 'Rendering', mapname\n # Render layer PDF.\n localfilename = basefilename + '_' + mapname + '.pdf';\n file = open(localfilename, 'wb')\n surface = cairo.PDFSurface(file.name, sizex, sizey) \n envMerc = LLToMerc(envLL)\n map = mapnik.Map(sizex, sizey)\n mapnik.load_map(map, mapname + \".xml\")\n map.zoom_to_box(envMerc)\n mapnik.render(map, surface)\n surface.finish()\n file.close()\n # Merge with master.\n if not mergedpdf: \n mergedpdf = PdfFileWriter()\n localpdf = PdfFileReader(open(localfilename, \"rb\"))\n page = localpdf.getPage(0)\n mergedpdf.addPage(page)\n else:\n localpdf = PdfFileReader(open(localfilename, \"rb\"))\n page.mergePage(localpdf.getPage(0))\n output = open(filename, 'wb')\n mergedpdf.write(output)\n output.close()", "def generate_roster_pdf(sched_act_ids, include_instructions):\n\n pdf_buffer = BytesIO()\n h_margin = 1 * inch\n v_margin = 0.5 * inch\n doc = SimpleDocTemplate(pdf_buffer, pagesize=letter,\n rightMargin=h_margin, leftMargin=h_margin,\n topMargin=v_margin, bottomMargin=v_margin)\n\n elements = []\n\n styles = getSampleStyleSheet()\n styles.add(ParagraphStyle(name=\"Center\", alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"BlockLetter\", fontSize=60, leading=72, alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"BlockLetterSmall\", fontSize=30, leading=72, alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"BlockLetterSmallest\", fontSize=20, leading=72, alignment=TA_CENTER))\n styles.add(ParagraphStyle(name=\"ActivityAttribute\", fontSize=15, leading=18, alignment=TA_RIGHT))\n\n for i, said in enumerate(sched_act_ids):\n sact = EighthScheduledActivity.objects.get(id=said)\n\n sponsor_names = sact.get_true_sponsors().values_list(\"first_name\",\n \"last_name\")\n sponsors_str = \"; \".join(l + \", \" + f for f, l in sponsor_names)\n\n room_names = sact.get_true_rooms().values_list(\"name\", flat=True)\n if len(room_names) == 1:\n rooms_str = \"Room \" + room_names[0]\n else:\n rooms_str = \"Rooms: \" + \", \".join(r for r in room_names)\n\n block_letter = sact.block.block_letter\n\n if len(block_letter) < 4:\n block_letter_width = 1 * inch\n block_letter_width += (0.5 * inch) * (len(block_letter) - 1)\n block_letter_style = \"BlockLetter\"\n elif len(block_letter) < 7:\n block_letter_width = 0.4 * inch\n block_letter_width += (0.3 * inch) * (len(block_letter) - 1)\n block_letter_style = \"BlockLetterSmall\"\n else:\n block_letter_width = 0.3 * inch\n block_letter_width += (0.2 * inch) * (len(block_letter) - 1)\n block_letter_style = \"BlockLetterSmallest\"\n\n header_data = [[\n Paragraph(\"<b>Activity ID: {}<br />Scheduled ID: {}</b>\".format(sact.activity.id, sact.id), styles[\"Normal\"]),\n Paragraph(\"{}<br/>{}<br/>{}\".format(sponsors_str,\n rooms_str,\n sact.block.date.strftime(\"%A, %B %-d, %Y\")),\n styles[\"ActivityAttribute\"]),\n Paragraph(block_letter, styles[block_letter_style])\n ]]\n header_style = TableStyle([\n (\"VALIGN\", (0, 0), (0, 0), \"TOP\"),\n (\"VALIGN\", (1, 0), (2, 0), \"MIDDLE\"),\n (\"TOPPADDING\", (0, 0), (0, 0), 15),\n (\"RIGHTPADDING\", (1, 0), (1, 0), 0),\n ])\n\n elements.append(Table(header_data, style=header_style, colWidths=[2 * inch, None, block_letter_width]))\n elements.append(Spacer(0, 10))\n elements.append(Paragraph(sact.full_title, styles[\"Title\"]))\n\n num_members = sact.members.count()\n num_members_label = \"{} Student{}\".format(num_members, \"s\" if num_members != 1 else \"\")\n elements.append(Paragraph(num_members_label, styles[\"Center\"]))\n elements.append(Spacer(0, 5))\n\n attendance_data = [[\n Paragraph(\"Present\", styles[\"Heading5\"]),\n Paragraph(\"Student Name (ID)\", styles[\"Heading5\"]),\n Paragraph(\"Grade\", styles[\"Heading5\"])\n ]]\n\n members = []\n for member in sact.members.all():\n members.append((\n member.last_name + \", \" + member.first_name,\n (member.student_id if member.student_id else \"User {}\".format(member.id)),\n int(member.grade) if member.grade else \"?\"\n ))\n members = sorted(members)\n\n for member_name, member_id, member_grade in members:\n row = [\"\", \"{} ({})\".format(member_name, member_id), member_grade]\n attendance_data.append(row)\n\n # Line commands are like this:\n # op, start, stop, weight, colour, cap, dashes, join, linecount, linespacing\n attendance_style = TableStyle([\n (\"LINEABOVE\", (0, 1), (2, 1), 1, colors.black, None, None, None, 2),\n (\"LINEBELOW\", (0, 1), (0, len(attendance_data)), 1, colors.black),\n (\"TOPPADDING\", (0, 1), (-1, -1), 6),\n (\"BOTTOMPADDING\", (0, 1), (-1, -1), 0),\n (\"BOTTOMPADDING\", (0, 0), (-1, 0), 5),\n ])\n\n elements.append(Table(attendance_data, style=attendance_style, colWidths=[1.3 * inch, None, 0.8 * inch]))\n elements.append(Spacer(0, 15))\n instructions = \"\"\"\n <b>Highlight or circle</b> the names of students who are <b>absent</b>, and put an <b>\"X\"</b> next to those <b>present</b>.<br />\n If a student arrives and their name is not on the roster, please send them to the <b>8th Period Office</b>.<br />\n If a student leaves your activity early, please make a note. <b>Do not make any additions to the roster.</b><br />\n Before leaving for the day, return the roster and any passes to 8th Period coordinator, Joan Burch's mailbox in the <b>main office</b>. For questions, please call extension 5046 or 5078. Thank you!<br />\"\"\"\n elements.append(Paragraph(instructions, styles[\"Normal\"]))\n\n if i != len(sched_act_ids) - 1:\n elements.append(PageBreak())\n\n doc.build(elements)\n return pdf_buffer", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def create_pdf(f,s1,s2='',s3=''):\n # does not need reportlab!\n if s1 == 'White Ballot': s1 = '\"'+'_'*10+'\"'\n cod = zlib.compress('BT /F1 16 Tf ET\\r\\nBT 300 270 Td (%s) Tj ET\\r\\nBT /F1 48 Tf ET\\r\\nBT 5 180 Td (%16s) Tj ET\\r\\nBT /F1 12 Tf ET\\r\\nBT 10 50 Td (%s) Tj ET'%(s3,s1,s2))\n open(f,'w').write(create_pdf.__doc__ + '/Length %d>>\\nstream\\n'%len(cod) + cod + 'endstream endobj\\ntrailer<</Root 4 0 R>>')", "def form(request):\n\n font_config = FontConfiguration()\n # Use the BCSans font as the default.\n css = CSS(string='''\n @font-face {\n font-family: 'BCSans';\n font-style: normal;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-Regular.woff') format('woff');\n }\n @font-face {\n font-family: 'BCSans';\n font-style: italic;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-Italic.woff') format('woff');\n }\n @font-face {\n font-family: 'BCSans';\n font-weight: 700;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-Bold.woff') format('woff');\n }\n @font-face {\n font-family: 'BCSans';\n font-style: italic;\n font-weight: 700;\n src: url('https://cdn.jsdelivr.net/npm/@bcgov/[email protected]/fonts/BCSans-BoldItalic.woff') format('woff');\n }''', font_config=font_config)\n\n\n\n data = json.loads(request.body)\n name = request.GET['name']\n template = '{}.html'.format(name)\n\n template = get_template(template)\n html_content = template.render(data)\n\n pdf_content = render_pdf(html_content)\n\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\n\n response.write(pdf_content, stylesheet[css], font_config=font_config)\n\n return response", "def make(self, key):\n\n fabrikant_name = self.user_table.get_current_user()\n seed = (self.seed_table & key).fetch1(\"seed\")\n\n dataloaders, model, trainer = self.load_model(key, include_trainer=True, include_state_dict=True, seed=seed)\n\n def call_back(**kwargs):\n self.connection.ping()\n self.call_back(**kwargs)\n\n score, output, model_state = trainer(model=model, dataloaders=dataloaders, seed=seed, uid=key, cb=call_back)\n transfer_data = output.pop(\"transfer_data\", None) if isinstance(output, Mapping) else None\n\n with tempfile.TemporaryDirectory() as temp_dir:\n filename = make_hash(key)\n key[\"score\"] = score\n key[\"output\"] = output\n key[\"fabrikant_name\"] = fabrikant_name\n comments = []\n comments.append((self.trainer_table & key).fetch1(\"trainer_comment\"))\n comments.append((self.model_table & key).fetch1(\"model_comment\"))\n comments.append((self.dataset_table & key).fetch1(\"dataset_comment\"))\n key[\"comment\"] = self.comment_delimitter.join(comments)\n\n self.insert1(key)\n self.CollapsedHistory().add_entry(key)\n\n if key[\"data_transfer\"] and transfer_data:\n data_path = os.path.join(temp_dir, filename + \"_transfer_data.npz\")\n np.savez(data_path, **transfer_data)\n key[\"transfer_data\"] = data_path\n self.DataStorage.insert1(key, ignore_extra_fields=True)\n filename += \".pth.tar\"\n filepath = os.path.join(temp_dir, filename)\n torch.save(model_state, filepath)\n key[\"model_state\"] = filepath\n self.ModelStorage.insert1(key, ignore_extra_fields=True)", "def generate_document(stats: dict, semester: str):\n filename = 'report_' + str(date.today()) + '.html'\n with open('raw_html.html', 'r') as f:\n string = f.read()\n string = string.format(semester,\n stats['faculty_with_usage'],\n stats['full_time'],\n stats['total_full_time'],\n round((stats['full_time'] / stats['total_full_time']) * 100, 1),\n stats['part_time'],\n stats['total_part_time'],\n round((stats['part_time'] / stats['total_part_time']) * 100, 1),\n stats['staff'],\n stats['courses_with_usage'],\n stats['total_courses'],\n round((stats['courses_with_usage'] / stats['total_courses']) * 100, 1),\n stats['specifics']['assignments'],\n stats['specifics']['grade'],\n stats['specifics']['graded'],\n stats['specifics']['discussion'])\n with open(filename, 'w') as f:\n f.write(string)\n pdf = weasyprint.HTML(filename).write_pdf()\n open(\"report_\" + str(date.today()) + \".pdf\", 'wb').write(pdf)", "def test_generate_pdf(self):\n with mock.patch.object(form_api.Client, 'wait') as wait_patched:\n template_id = 'tpl_000000000000000001' # str |\n\n response = self.client.generate_pdf(\n template_id, {\n 'data': {\n 'title': 'Test PDF',\n 'description': 'This PDF is great!'\n }\n })\n wait_patched.assert_called()\n self.assertEquals(response.status, 'success')\n submission = response.submission\n self.assertRegexpMatches(submission.id, '^sub_')\n self.assertEquals(submission.expired, False)\n self.assertEquals(submission.state, 'processed')", "def make_submission(data, model, path):\n counter = 0\n length= len(data)\n test_predictions = []\n #Data has form of [(id,vec),(id,vec)....]\n for instance in data:\n print(\"Prog: \",(counter/length*100))\n counter+=1\n id = instance[0]\n vec = instance[1]\n res = model.predict(vec)\n print(\"Predicted: \",res)\n test_predictions.append({\"id\":id,\"prediction\":res})\n with open(path+\".json\", \"w\", encoding=\"utf-8\") as f:\n for doc in test_predictions:\n f.write(json.dumps(doc) + \"\\n\")", "def face_down_plot(model_file: str) -> None:\n drawn_model = ossssim.ModelFile(model_file)\n plot = RosePlot(drawn_model.epoch)\n plot.add_model(drawn_model, mc='k', ms=1, alpha=0.5, sample_size=5000)\n plt.savefig(f'{os.path.splitext(model_file)[0]}.pdf')", "def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")", "def render_as_pdf(self, width, height):\n pass", "def download_assignment_prof(request, pk):\n assignment = Assignment.objects.\\\n filter(pk=pk, assignmentype__prof__user=request.user).first()\n if assignment:\n filename = 'assign_%s.%s' % (assignment.student.user.username,\n assignment.document.name.split('.')[-1])\n response = HttpResponse(assignment.document,\n content_type='application/force_download')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def convert_to_pdf(self, news_list):\n self.logger.info(\"Converting news to PDF...\")\n self.prepare_storage()\n self.process_news_list_with_images(news_list)\n content = self.generate_html_template(news_list)\n pdf = io.BytesIO()\n pisa.pisaDocument(content, pdf)\n self.write_to_file(pdf.getvalue())", "def create_model(self, token_lists, output_path):\n try:\n\n # create topic model & fit token_lists\n tm = TM(\n method=self.method, random_state=self.random_state, pre_trained_name=self.pre_trained_name, \n n_neighbors=self.n_neighbors, n_components=self.n_components, min_dist=self.min_dist, \n umap_metric=self.umap_metric, min_cluster_size=self.min_cluster_size, cluster_metric=self.cluster_metric, \n cluster_selection_method=self.cluster_selection_method, prediction_data=self.prediction_data\n )\n\n tm.fit(token_lists)\n\n # define output path\n subdir = \"{}_k_{}_{}_{}\".format(self.lang_code, str(tm.k), self.method, self.version)\n models_path = output_path + \"/models/\" + subdir\n dict_path = output_path + \"/dictionary/\"\n corpus_path = output_path + \"/corpus/\"\n eval_path = output_path + \"/evaluation/\" + subdir\n \n # create directories\n make_dirs(output_path)\n make_dirs(models_path)\n make_dirs(dict_path)\n make_dirs(corpus_path)\n make_dirs(eval_path)\n make_dirs(eval_path + \"/wordcloud\")\n\n # extract topics \n topics = self.extract_topics(tm.tf_idf, tm.feature_names, tm.labels)\n topic_words = self.get_topic_words(topics)\n \n # evaluate topics \n c_v = self.compute_coherence(topic_words, tm.dictionary, tm.corpus, token_lists, measure='c_v')\n u_mass = self.compute_coherence(topic_words, tm.dictionary, tm.corpus, token_lists, measure='u_mass')\n silhouette = self.compute_silhouette(tm.umap_embeddings, tm.labels)\n \n # hyperparameters & evaluation metrics\n metrics = {\n \"k\" : tm.k,\n \"lang_code\" : self.lang_code,\n \"num_docs\" : len(token_lists),\n \"method\" : self.method,\n \"version\" : self.version,\n \"norm_type\" : self.norm_type,\n \"pre_trained_name\" : self.pre_trained_name,\n \"random_state\" : self.random_state,\n \"n_neighbors\" : self.n_neighbors,\n \"n_components\" : self.n_components,\n \"min_dist\" : self.min_dist,\n \"umap_metric\" : self.umap_metric,\n \"min_cluster_size\" : self.min_cluster_size,\n \"cluster_metric\" : self.cluster_metric,\n \"cluster_selection_method\" : self.cluster_selection_method, \n \"c_v\" : c_v,\n \"u_mass\" : u_mass,\n \"silhouette\" : silhouette\n }\n \n # save topic model, dictionary, corpus \n tm.dictionary.save(dict_path + \"/dict.gensim\")\n dump_to_pickle(tm.corpus, corpus_path + \"/corpus.pkl\")\n dump_to_pickle(tm.sentences, eval_path + \"/sentences.pkl\")\n dump_to_pickle(tm.embeddings, eval_path + \"/embeddings.pkl\")\n dump_to_pickle(topics, models_path + \"/topics.pkl\")\n dump_to_pickle(tm.cluster_model, models_path + \"/cluster.pkl\")\n dump_to_pickle(tm.umap_model, models_path + \"/umap.pkl\")\n\n # save topic terms\n self.save_topic_terms(topics, eval_path + '/topic_terms.txt')\n \n # save metrics, topic_terms dataframe \n dump_to_json(metrics, eval_path + \"/evaluation.json\", sort_keys=False)\n\n # save topic visualization, topic wordclouds\n save_topic_visualization(tm.embeddings, tm.labels, eval_path + \"/topics.png\")\n save_topic_wordclouds(topics, self.num_wordcloud_words, eval_path + \"/wordcloud\")\n\n except Exception:\n logging.error('error occured', exc_info=True)", "def create_submission_file(\n json_out_file, challenge, submission_url, model_name, model_description, nyu_data_only,\n participants=None, paper_url=None, code_url=None\n):\n\n if challenge not in {'singlecoil', 'multicoil'}:\n raise ValueError(f'Challenge should be singlecoil or multicoil, not {challenge}')\n\n phase_name = f'{challenge}_leaderboard'\n submission_data = dict(\n recon_zip_url=submission_url,\n model_name=model_name,\n model_description=model_description,\n nyudata_only=nyu_data_only,\n participants=participants,\n paper_url=paper_url,\n code_url=code_url\n )\n submission_data = dict(result=[{\n phase_name: submission_data\n }])\n\n with open(json_out_file, 'w') as json_file:\n json.dump(submission_data, json_file, indent=2)", "def generate_pdf():\n uu_id = uuid.uuid4().hex\n current_app.logger.debug(uu_id)\n current_app.logger.debug(request.form)\n tmp_pdf_filename = \"{}_tmp.pdf\".format(uu_id)\n pdf_filename = \"{}.pdf\".format(uu_id)\n tmp_out_pdf_path = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], tmp_pdf_filename\n )\n out_pdf_path = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], pdf_filename\n )\n fill_template_from_input(\n request.form,\n current_app.config[\"PDF_TEMPLATE_PATH\"],\n tmp_out_pdf_path,\n INPUT_FORM_MAP\n )\n fdf_tmp = os.path.join(\n current_app.config[\"UPLOAD_FOLDER\"], 'tmp.fdf'\n )\n os.system('pdftk ' + tmp_out_pdf_path + ' generate_fdf output ' + fdf_tmp)\n os.system(\n 'pdftk ' + tmp_out_pdf_path + ' fill_form ' + fdf_tmp +\n ' output ' + out_pdf_path + ' flatten'\n )\n os.remove(tmp_out_pdf_path)\n return pdf_filename", "def encode(self, resource, **attr):\n\n if not PILImported:\n current.session.warning = self.ERROR.PIL_ERROR\n if not reportLabImported:\n current.session.error = self.ERROR.RL_ERROR\n redirect(URL(extension=\"\"))\n\n # Settings\n r = self.r = attr.get(\"request\", None)\n self.list_fields = attr.get(\"list_fields\")\n self.pdf_groupby = attr.get(\"pdf_groupby\")\n self.pdf_orderby = attr.get(\"pdf_orderby\")\n self.pdf_hide_comments = attr.get(\"pdf_hide_comments\")\n self.table_autogrow = attr.get(\"pdf_table_autogrow\")\n self.pdf_header_padding = attr.get(\"pdf_header_padding\", 0)\n self.pdf_footer_padding = attr.get(\"pdf_footer_padding\", 0)\n\n # Get the title & filename\n now = current.request.now.isoformat()[:19].replace(\"T\", \" \")\n title = attr.get(\"pdf_title\")\n if title == None:\n title = \"Report\"\n docTitle = \"%s %s\" % (title, now)\n filename = attr.get(\"pdf_filename\")\n if filename is None:\n if not isinstance(title, str):\n # Must be str not unicode\n title = title.encode(\"utf-8\")\n filename = \"%s_%s.pdf\" % (title, now)\n elif len(filename) < 5 or filename[-4:] != \".pdf\":\n # Add extension\n filename = \"%s.pdf\" % filename\n self.filename = filename\n\n # Get the Doc Template\n paper_size = attr.get(\"paper_size\")\n pdf_paper_alignment = attr.get(\"pdf_paper_alignment\", \"Portrait\")\n doc = EdenDocTemplate(title=docTitle,\n paper_size=paper_size,\n paper_alignment=pdf_paper_alignment)\n\n # Get the header\n header_flowable = None\n header = attr.get(\"pdf_header\")\n if not header:\n header = attr.get(\"rheader\")\n if header:\n header_flowable = self.get_html_flowable(header,\n doc.printable_width)\n if self.pdf_header_padding:\n header_flowable.append(Spacer(1, self.pdf_header_padding))\n\n # Get the footer\n footer_flowable = None\n footer = attr.get(\"pdf_footer\")\n if not footer:\n footer = attr.get(\"rfooter\")\n if footer:\n footer_flowable = self.get_html_flowable(footer,\n doc.printable_width)\n if self.pdf_footer_padding:\n footer_flowable.append(Spacer(1, self.pdf_footer_padding))\n\n # Build report template\n\n # Get data for the body of the text\n data = None\n body_flowable = None\n\n doc.calc_body_size(header_flowable, footer_flowable)\n\n callback = attr.get(\"pdf_callback\")\n pdf_componentname = attr.get(\"pdf_componentname\", None)\n if callback:\n # Get the document body from the callback\n body_flowable = self.get_html_flowable(callback(r),\n doc.printable_width)\n\n elif pdf_componentname: # and resource.parent is None:\n # Enforce a particular component\n resource = current.s3db.resource(r.tablename,\n components=[pdf_componentname],\n id=r.id)\n if pdf_componentname in resource.components:\n component = resource.components[pdf_componentname]\n body_flowable = self.get_resource_flowable(component, doc)\n\n elif r.component or attr.get(\"method\", \"list\") != \"read\":\n # Use the requested resource\n body_flowable = self.get_resource_flowable(resource, doc)\n\n styleSheet = getSampleStyleSheet()\n style = styleSheet[\"Normal\"]\n style.fontName = self.font_name\n style.fontSize = 9\n if not body_flowable:\n body_flowable = [Paragraph(\"\", style)]\n self.normalstyle = style\n\n # Build the PDF\n doc.build(header_flowable,\n body_flowable,\n footer_flowable,\n )\n\n # Return the generated PDF\n response = current.response\n if response:\n disposition = \"attachment; filename=\\\"%s\\\"\" % self.filename\n response.headers[\"Content-Type\"] = contenttype(\".pdf\")\n response.headers[\"Content-disposition\"] = disposition\n\n return doc.output.getvalue()", "def save_pdf(self, response):\n\n if response.status != 200 and self.file_retries > 0:\n self.file_retries -= 1\n yield response.request.replace(dont_filter=True)\n return\n elif response.status != 200:\n return\n else:\n # refresh\n self.file_retries = 3\n\n # get metadata\n file_type = \"__boleto__\"\n invoice_status = response.meta['invoice_status']\n document = response.meta['document']\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n document_value = self.result[document]\n [item.update({\n file_type: {\n \"file_id\": file_id}\n }) for item in document_value[invoice_status]\n if item == response.meta['invoice']]\n self.result.update({document: document_value})", "def create_page(self):", "def sa_summary_pdf(sa_id):\n pass", "def preview():\r\n html = create_html_report()\r\n return html", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def form_valid(self, *args, **kwargs):\n\t\tform = kwargs['form']\n\n\t\tself.object = form.save(commit=False)\n\n\t\timport logging\n\t\tlogger = logging.getLogger(\"furnicloud\")\n\t\tlogger.debug(form.get_data_fields_dict())\n\n\t\t# save data fields to model instance\n\t\tdata_fields = form.cleaned_data\n\t\tdel data_fields['claim']\n\t\tdel data_fields['file']\n\t\tself.object.data_fields = json.dumps(data_fields)\n\n\t\t# generate PDF claim request file and save to model instance field\n\t\tpdf_template = self.get_pdf_template_name()\n\t\ttemplate = pdf.get_template(pdf_template)\n\t\tcontext = form.get_data_fields_dict()\n\t\twith TemporaryFile(mode=\"w+b\") as f: #open('', 'wb') as f:\n\t\t\tpdf_contents = template.render(context) \n\t\t\tf.write(pdf_contents)\n\t\t\t#f.write(json.dumps(context))\n\t\t\tf.seek(0) # go to beginning of file\n\t\t\t#reopen = open('/tmp/claim.pdf', 'rb')\n\t\t\tclaim_file = File(f)\n\t\t\tdate_str = datetime.now().strftime(\"%Y_%m_%d\")\n\t\t\tself.object.file.save(\"claim_request\" + date_str + \".pdf\", claim_file, save=True)\n\n\t\t# save order\n\t\tself.object.save()\n\n\t\t#response = HttpResponse(content_type='application/pdf')\n\t\t#response['Content-Disposition'] = \\\n\t\t#\t'attachment; filename=Natuzzi_service_request_form.pdf'\n\n\t\t#response.write(pdf_contents)\n\t\t#return response\n\n\t\treturn HttpResponseRedirect(self.get_success_url())", "def makeDocument(fontPath):\n\n f = Font(fontPath) # Get PageBot Font instance of Variable font.\n \n W = H = PageSize\n\n # Create a new document, default to the defined page size. \n doc = Document(w=W, h=H, originTop=False, title='Text Flow', autoPages=1)\n \n view = doc.getView()\n view.padding = 0 # Aboid showing of crop marks, etc.\n view.showPageCropMarks = True\n view.showPageRegistrationMarks = True\n view.showPageFrame = True\n view.showPagePadding = True\n view.showElementOrigin = False\n view.showElementDimensions = False\n \n # Get list of pages with equal y, then equal x. \n #page = doc[0][0] # Get the single page from te document.\n page = doc.getPage(0) # Get page on pageNumber, first in row (this is only one now).\n page.name = 'Page 1'\n page.padding = PagePadding\n \n fs = newFS(f.info.familyName + ' ' + f.info.styleName, \n style=dict(font=f.name, fontSize=18, textFill=0))\n _, th = textSize(fs)\n title = newTextBox(fs, conditions=[Top2Top(), Fit2Width()],\n parent=page, h=th*1.2)\n \n circle = VariableCircle(f, s=GLYPH_NAME, name='VariableCircleSpeciment',\n parent=page, padding=4, x=100, fontSize=64,\n maxW=W-2*PagePadding, minW=100, showAxisName=True, \n # Conditions make the element move to top-left of the page.\n # And the condition that there should be no overflow, otherwise the text box\n # will try to solve it. \n conditions=[Float2Top(), Fit2Bottom(), Center2Center()],\n # Position of the origin of the element. Just to show where it is.\n # Has no effect on the position conditions. \n yAlign=BOTTOM, xAlign=LEFT, fill=CIRCLE_ELEMENT_FILL, borders=0,\n )\n \n score = doc.solve() # Try to solve all pages.\n if score.fails:\n print score.fails\n\n # To avoid circular dependent conditions, we correct the position of the title\n # on left to that the position of the circle has become.\n title.pl = circle.x - page.pl\n \n return doc # Answer the doc for further doing.", "def post(self):\n \n first_intl = self.request.get(\"first_name\")[:3]\n last_name = self.request.get(\"last_name\")\n \n first_i_intl = self.request.get('int_first_name')[:3]\n last_i_name = self.request.get('int_last_name')\n \n if first_i_intl == \"\":\n interviewer_id = self.request.get('interviewer')\n else:\n interviewer_id = first_i_intl + last_i_name\n interviewer = Interviewer()\n interviewer.i_short = interviewer_id\n interviewer.first_name = self.request.get('int_first_name')\n interviewer.last_name = self.request.get('int_last_name')\n interviewer.key = ndb.Key(Interviewer, interviewer_id)\n interviewer.put()\n \n \n if first_intl == \"\":\n candidate_id = self.request.get('candidate')\n else:\n candidate_id = first_intl + last_name\n candidate = Candidate()\n candidate.c_short = candidate_id\n candidate.first_name = self.request.get(\"first_name\")\n candidate.last_name = self.request.get(\"last_name\")\n \n dNow = datetime.now()\n \n sNM = dNow.month\n sNY = dNow.year\n \n sNum = (sNY - BASE_SESSION_YEAR)*3 + mapYear(sNM) + 1\n \n \"\"\"candidate.session_num = sNum\"\"\"\n\n candidate.key = ndb.Key(Candidate, candidate_id)\n candidate.put()\n \n \n feedback = Feedback(parent=candidate_key(candidate_id))\n \n feedback.interviewer = interviewer_id\n feedback.company = self.request.get('company')\n \"\"\"Ratings\"\"\"\n feedback.personality_scr = self.request.get('personality_scr')\n feedback.personality_descrip = self.request.get('personality_descrip')\n feedback.appearance_scr = self.request.get('appearance_scr')\n feedback.appearance_descrip = self.request.get('appearance_descrip')\n feedback.comm_scr = self.request.get('comm_scr')\n feedback.comm_descrip = self.request.get('comm_descrip')\n feedback.tech_scr = self.request.get('tech_scr')\n feedback.tech_descrip = self.request.get('tech_descrip')\n feedback.body_scr = self.request.get('body_scr')\n feedback.body_descrip = self.request.get('body_descrip')\n \"\"\"Additional Feedback responses\"\"\"\n feedback.pres_scr = self.request.get('pres_scr')\n feedback.pres_descrip = self.request.get('pres_descrip')\n feedback.exp_scr = self.request.get('exp_scr')\n feedback.rec_descrip = self.request.get('rec_descrip')\n feedback.thanks_scr = self.request.get('thanks_scr')\n \n dNow = datetime.now() \n sNM = dNow.month\n sNY = dNow.year\n sNum = (sNY - BASE_SESSION_YEAR)*3 + mapYear(sNM) + 1\n feedback.cycle = sNum\n \n feedback.put()\n \n self.redirect('/thanks')", "def generatePredictorDataTemplate(self):\n self.__pdir = Predictor.directory\n self.__predictorData = PredictorData(None)\n self.save()", "def build(self, buffer=None) -> any:\n\n # init PDF\n if buffer:\n self.PDF = canvas.Canvas(buffer, pagesize=letter, bottomup=0)\n else:\n self.PDF = canvas.Canvas(self.buffer, pagesize=letter, bottomup=0)\n\n # run funcs\n\n self.add_title()\n self.draw_image()\n self.add_org_details()\n self.add_customer_details()\n self.draw_line(225)\n self.add_due_date()\n self.add_subscription()\n\n # save the pdf\n\n self.PDF.save()\n return self.PDF", "def download_profile(self, request, user_id=None):\n current_url = '%s?%s' % (\n reverse(request.resolver_match.url_name, kwargs={'user_id': user_id}),\n urlencode(request.query_params)\n )\n login_url = '/signin?next=%s' % quote_plus(current_url)\n if not request.user.is_authenticated():\n return redirect(login_url)\n\n user = get_object_or_404(self.get_queryset(), pk=user_id)\n\n try:\n self.check_object_permissions(request, user)\n except NotAuthenticated:\n return redirect(login_url)\n except PermissionDenied:\n return HttpResponse(\"You do not have permission to access this estimate\")\n\n ctx = {\n 'user': user,\n 'profile': user.profile,\n 'work': user.work_set.all(),\n 'education': user.education_set.all()\n }\n\n rendered_html = render_to_string(\"tunga/pdf/profile.html\", context=ctx).encode(encoding=\"UTF-8\")\n\n if request.accepted_renderer.format == 'html':\n return HttpResponse(rendered_html)\n\n pdf_file = HTML(string=rendered_html, encoding='utf-8').write_pdf()\n http_response = HttpResponse(pdf_file, content_type='application/pdf')\n http_response['Content-Disposition'] = 'filename=\"developer_profile.pdf\"'\n return http_response", "def hs_document(self):\n doc_service = PDFKitService(html=self.html())\n return doc_service.pdf(template_name='engageletter.html')", "def principal(sessao,lst_destinatarios):\n\n arquivoPdf=str(int(time.time()*100))+\".pdf\"\n\n tmp_data=''\n tmp_data+='<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\\n'\n tmp_data+='<!DOCTYPE document SYSTEM \"rml_1_0.dtd\">\\n'\n tmp_data+='<document filename=\"envelopes.pdf\">\\n'\n tmp_data+='\\t<template pageSize=\"(62mm, 32mm)\" title=\"Etiquetas de Envelope\" author=\"OpenLegis\" allowSplitting=\"20\">\\n'\n tmp_data+='\\t\\t<pageTemplate id=\"main\">\\n'\n tmp_data+='\\t\\t<pageGraphics>\\n'\n tmp_data+='\\t\\t</pageGraphics>\\n'\n tmp_data+='\\t\\t\\t<frame id=\"main\" x1=\"0.02cm\" y1=\"0.02cm\" width=\"61mm\" height=\"32mm\"/>\\n'\n tmp_data+='\\t\\t</pageTemplate>\\n'\n tmp_data+='\\t</template>\\n'\n tmp_data+=paraStyle()\n tmp_data+=destinatarios(lst_destinatarios)\n tmp_data+='</document>\\n'\n tmp_pdf=parseString(tmp_data)\n\n if hasattr(context.temp_folder,arquivoPdf):\n context.temp_folder.manage_delObjects(ids=arquivoPdf)\n context.temp_folder.manage_addFile(arquivoPdf)\n arq=context.temp_folder[arquivoPdf]\n arq.manage_edit(title='Arquivo PDF temporário.',filedata=tmp_pdf,content_type='application/pdf')\n\n return \"/temp_folder/\"+arquivoPdf", "def predict():\n return render_template(\n 'predict.html',\n year=datetime.now().year,\n )", "def _pdf(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'pdf')\n xmlDoc = PDFiD(self.src_path)\n oPDFiD = cPDFiD(xmlDoc, True)\n # TODO: are there other characteristics which should be dangerous?\n if oPDFiD.encrypt.count > 0:\n self.make_dangerous('encrypted pdf')\n if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:\n self.make_dangerous('pdf with javascript')\n if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:\n self.make_dangerous('openaction')\n if oPDFiD.richmedia.count > 0:\n self.make_dangerous('flash')\n if oPDFiD.launch.count > 0:\n self.make_dangerous('launch')", "def export(bill, template_dir=None, pdf_dir=None):\n # if template_dir not provided,\n # look for the template directory of this script's location\n if not template_dir:\n template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'template')\n # If the user-defined or default template directories don't exist, raise an error\n if not os.path.exists(template_dir):\n raise OSError('Could not find the template directory')\n\n # If no user-defined pdf output directory, put it in a folder where this script lives\n if not pdf_dir:\n basedir = os.path.dirname(os.path.abspath(__file__))\n pdf_dir = os.path.join(basedir, 'pdfs')\n # if the default pdf output directory doesn't exist, make it\n if not os.path.exists(pdf_dir):\n os.makedirs(pdf_dir)\n\n # if the user-defined pdf_dir does not exist, raise an error\n if not os.path.exists(pdf_dir):\n raise IOError('Could not find a directory to output pdfs')\n\n # get the path to the template\n template_path = os.path.join(template_dir, 'templates', 'template.html')\n # read the template\n template = open(template_path).read()\n\n # Replace relative imports of images and CSS with the full path to the files\n # Note: I'm including the '/' in the replacement so that\n # it doesn't replace other uses for '..' such as in regular text (i.e. an ellipsis)\n template = template.replace('../', os.path.join(path2url(template_dir), ''))\n\n # Insert billing data using find/replace\n # Sort by field length longest to shortest\n # This prevents values from fields that are substrings of other fields from going in the wrong place\n # e.g. the value of \"rebate\" would be inserted into the field \"rebate_closing_balance\"\n for key, value in sorted(bill.items(), key=lambda t: len(t[0]), reverse=True):\n template = template.replace(\"__\"+key, format_value(value))\n\n # Now create the pdf\n try:\n # options = {'encoding': 'utf-8'}\n report_name = make_report_name(bill)\n output_file = os.path.join(pdf_dir, report_name)\n pdfkit.from_string(template, output_file)\n except:\n typ, value, tb = sys.exc_info()\n traceback.print_exc()\n pdb.post_mortem(tb)", "def _generate_attachment(self):\n Attachment = self.env['ir.attachment']\n ReportXml = self.env['ir.actions.report.xml']\n Report = self.env['report']\n pages = {}\n for current_order in self:\n report = ReportXml.search([('model', '=', current_order.res_model)], limit=1)\n if current_order.attachment_id: # compute page number\n # avoid to recompute the number of page each time for the attachment\n nbr_pages = pages.get(current_order.attachment_id.id)\n if not nbr_pages:\n nbr_pages = current_order._count_pages_pdf(current_order.attachment_id.datas.decode('base64'))\n pages[current_order.attachment_id.id] = nbr_pages\n current_order.write({\n 'nbr_pages': nbr_pages\n })\n elif not current_order.attachment_id and current_order.res_model and current_order.res_id and report: # check report\n # browse object and find its pdf (binary content)\n object_to_print = self.env[current_order.res_model].browse(current_order.res_id)\n bin_pdf = Report.get_pdf(object_to_print, report.report_name)\n\n # compute the name of the new attachment\n filename = False\n if report.attachment:\n filename = safe_eval(report.attachment, {'object': object_to_print, 'time': time})\n if not filename:\n filename = '%s-%s' % (current_order.res_model.replace(\".\", \"_\"), current_order.res_id)\n\n # create the new ir_attachment\n attachment_value = {\n 'name': filename,\n 'res_name': filename,\n 'res_model': current_order.res_model,\n 'res_id': current_order.res_id,\n 'datas': base64.b64encode(bin_pdf),\n 'datas_fname': filename+'.pdf',\n }\n new_attachment = Attachment.create(attachment_value)\n\n # add the new attachment to the print order\n current_order.write({\n 'nbr_pages': self._count_pages_pdf(bin_pdf),\n 'attachment_id': new_attachment.id\n })\n elif not current_order.attachment_id and current_order.res_model and current_order.res_id and not report: # error : no ir.actions.report.xml found for res_model\n current_order.write({\n 'state': 'error',\n 'error_message': _('The document you want to print and send is not printable. There is no report action (ir.actions.report.xml) for the model %s.') % (current_order.res_model,)\n })\n else: # error : not attachament can be generate, no attach_id or no res_model/res_id\n current_order.write({\n 'state': 'error',\n 'error_message': _('The document has no associated PDF : you have to give select an Attachment file, or set up the Object ID and Model Name fields.')\n })", "def generate_pdf(file_path_or_url, data_type, filename):\n file_path = get_pdf_file_path(filename)\n if data_type == TYPE_FILE:\n try:\n HTML(filename=file_path_or_url).write_pdf(file_path)\n finally:\n default_storage.delete(file_path_or_url)\n else:\n HTML(file_path_or_url).write_pdf(file_path)\n return filename", "def export_docs(fp, app_name):\n from otree.models import Session\n from otree.models import Participant\n from otree.views.admin import get_all_fields\n\n # generate doct_dict\n models_module = get_models_module(app_name)\n\n model_names = [\"Participant\", \"Player\", \"Group\", \"Subsession\", \"Session\"]\n line_break = '\\r\\n'\n\n def choices_readable(choices):\n lines = []\n for value, name in choices:\n # unicode() call is for lazy translation strings\n lines.append(u'{}: {}'.format(value, six.text_type(name)))\n return lines\n\n def generate_doc_dict():\n doc_dict = OrderedDict()\n\n data_types_readable = {\n 'PositiveIntegerField': 'positive integer',\n 'IntegerField': 'integer',\n 'BooleanField': 'boolean',\n 'CharField': 'text',\n 'TextField': 'text',\n 'FloatField': 'decimal',\n 'DecimalField': 'decimal',\n 'CurrencyField': 'currency'}\n\n for model_name in model_names:\n if model_name == 'Participant':\n Model = Participant\n elif model_name == 'Session':\n Model = Session\n else:\n Model = getattr(models_module, model_name)\n\n field_names = set(field.name for field in Model._meta.fields)\n\n members = get_all_fields(Model, for_export=True)\n doc_dict[model_name] = OrderedDict()\n\n for member_name in members:\n member = getattr(Model, member_name, None)\n doc_dict[model_name][member_name] = OrderedDict()\n if member_name == 'id':\n doc_dict[model_name][member_name]['type'] = [\n 'positive integer']\n doc_dict[model_name][member_name]['doc'] = ['Unique ID']\n elif member_name in field_names:\n member = Model._meta.get_field_by_name(member_name)[0]\n\n internal_type = member.get_internal_type()\n data_type = data_types_readable.get(\n internal_type, internal_type)\n\n doc_dict[model_name][member_name]['type'] = [data_type]\n\n # flag error if the model doesn't have a doc attribute,\n # which it should unless the field is a 3rd party field\n doc = getattr(member, 'doc', '[error]') or ''\n doc_dict[model_name][member_name]['doc'] = [\n line.strip() for line in doc.splitlines()\n if line.strip()]\n\n choices = getattr(member, 'choices', None)\n if choices:\n doc_dict[model_name][member_name]['choices'] = (\n choices_readable(choices))\n elif isinstance(member, collections.Callable):\n doc_dict[model_name][member_name]['doc'] = [\n inspect.getdoc(member)]\n return doc_dict\n\n def docs_as_string(doc_dict):\n\n first_line = '{}: Documentation'.format(app_name_format(app_name))\n second_line = '*' * len(first_line)\n\n lines = [\n first_line, second_line, '',\n 'Accessed: {}'.format(datetime.date.today().isoformat()), '']\n\n app_doc = getattr(models_module, 'doc', '')\n if app_doc:\n lines += [app_doc, '']\n\n for model_name in doc_dict:\n lines.append(model_name)\n\n for member in doc_dict[model_name]:\n lines.append('\\t{}'.format(member))\n for info_type in doc_dict[model_name][member]:\n lines.append('\\t\\t{}'.format(info_type))\n for info_line in doc_dict[model_name][member][info_type]:\n lines.append(u'{}{}'.format('\\t' * 3, info_line))\n\n output = u'\\n'.join(lines)\n return output.replace('\\n', line_break).replace('\\t', ' ')\n\n doc_dict = generate_doc_dict()\n doc = docs_as_string(doc_dict)\n fp.write(doc)", "def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)", "def download(request, ef_id):\n ef = get_object_or_404(ExamFile, id=ef_id)\n path = os.path.join(settings.MEDIA_ROOT, ef.path.path)\n response= HttpResponse(content=file(path, 'rb').read(), \n mimetype='application/pdf')\n # fn = os.path.split(ef.path.path)[1]\n # response['Content-Disposition'] = \"attachment; filename=%s\" % (fn)\n return response", "def create(self):\n\n news_mat = self.__get_news_matrix()\n topic_mat = self.get_topic_matrix()\n\n print('compare user preference and news')\n sim_mat = cosine_similarity(news_mat, topic_mat)\n\n np.save(data_path + 'similarity_matrix.npy', sim_mat)\n\n with open(data_path + 'id2topic.json', 'w') as f:\n json.dump(self.id2topic, f)", "def score_intro_model():\n k = 100\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n mc = ModelChooser([model])\n dp = DataPrep(training=False)\n dp.prepare(n_components=k, use_cached_nmf='/home/ubuntu/ca_bills_project/data/extra/nmf_100_05-23-17-08-23.pkl',\n use_cached_tfidf=\"/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl\", cache_tfidf=True, test=True)\n X_test, y_test = dp.subset(features)\n\n\n mc.score(X_test, y_test)", "def _CreateReport(failed_percentage):\r\n\r\n pdf = FPDF()\r\n pdf.add_page()\r\n pdf.set_font(\"Arial\", size=12)\r\n pdf.cell(200, 10, txt=\"Report\", ln=1, align='C')\r\n pdf.cell(200, 10, txt=\"Scenario ID: {0}\".format(sys.argv[2]), ln=2, align='C')\r\n\r\n # prints failures above threshold to the report\r\n for i in range(39):\r\n if failed_percentage[i] > FAILURE_THRESHOLD:\r\n pdf.cell(200, 10, txt=\"Requirement {0}: {1}% failure\".format(i + 1, failed_percentage[i]), ln=3)\r\n\r\n pdf.output(\"report.pdf\")", "def predict_model():\n # Decode the request\n data = request.data.decode(\"utf-8\")\n\n # Write data from the request in a local csv file\n test_csv = \"test_local.csv\"\n f = open(test_csv, \"w\", encoding=\"utf-8\")\n f.write(data)\n f.close()\n\n # Load the test csv file as a DataFrame\n test_df = pd.read_csv(test_csv)\n\n # Get submission DataFrame\n predictions_df = model.predict(test_df)\n\n # Send csv file as response\n res = make_response(predictions_df.to_csv(index=False))\n res.headers[\"Content-Disposition\"] = \"attachment; filename=submission.csv\"\n res.headers[\"Content-Type\"] = \"text/csv\"\n return res", "async def leaderboard(self, ctx):\n\t\tasync with ctx.typing():\n\t\t\ttry:\n\t\t\t\tlbFunc = functools.partial(save_leaderboard)\n\t\t\t\tawait self.bot.loop.run_in_executor(None, lbFunc)\n\t\t\t\tawait ctx.send(file=discord.File(\"leaderboard.png\"))\n\t\t\texcept:\n\t\t\t\tawait ctx.send(\n\t\t\t\t\t\"https://aninternettroll.github.io/mcbeVerifierLeaderboard/\"\n\t\t\t\t)", "def pdf_gen(report, summary=None):\n with open(\"report_content.yaml\", \"r\") as stream:\n docs = yaml.safe_load(stream)\n\n style = g_stylesheet.get(\"styles\")\n elems = [] # elements array used to build pdf structure\n pdf = SimpleDocTemplate(\n f\"{report.replay_id}_report.pdf\",\n pagesize=letter,\n leftMargin=0.75 * inch,\n rightMargin=0.75 * inch,\n topMargin=0.75 * inch,\n bottomMargin=0.75 * inch,\n )\n\n # title and subtitle and cluster info table\n elems.append(Paragraph(docs[\"title\"], style[\"Title\"]))\n elems.append(\n Paragraph(sub_yaml_vars(report, docs[\"subtitle\"]), style[\"Heading4\"])\n )\n cluster_info = pd.DataFrame.from_dict(report.cluster_details, orient=\"index\")\n elems.append(\n Table(\n df_to_np(report.cluster_details.keys(), cluster_info.transpose()),\n hAlign=\"LEFT\",\n style=g_stylesheet.get(\"table_style\"),\n )\n )\n # replay summary\n if summary is not None:\n elems.append(Paragraph(f\"Replay Summary\", style[\"Heading4\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in summary],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n\n elems.append(Paragraph(docs[\"report_paragraph\"], style[\"Normal\"]))\n\n # glossary section\n elems.append(Paragraph(docs[\"glossary_header\"], style[\"Heading4\"]))\n elems.append(Paragraph(docs[\"glossary_paragraph\"], style[\"Normal\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"glossary\"]],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n\n # access data section\n elems.append(Paragraph(docs[\"data_header\"], style[\"Heading4\"]))\n elems.append(\n Paragraph(sub_yaml_vars(report, docs[\"data_paragraph\"]), style[\"Normal\"])\n )\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"raw_data\"]],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n elems.append(\n Paragraph(\n sub_yaml_vars(report, docs[\"agg_data_paragraph\"]), style[\"Normal\"]\n )\n )\n\n # notes section\n elems.append(Paragraph(docs[\"notes_header\"], style[\"Heading4\"]))\n elems.append(Paragraph(docs[\"notes_paragraph\"], style[\"Normal\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"notes\"]],\n bulletType=\"bullet\",\n )\n )\n\n elems.append(PageBreak()) # page 2: cluster details\n\n # query breakdown\n build_pdf_tables(elems, docs[\"query_breakdown\"], report)\n elems.append(Spacer(0, 5))\n\n # histogram and description\n image_path = hist_gen(\n x_data=report.feature_graph[\"sec_start\"],\n y_data=report.feature_graph[\"count\"],\n title=docs[\"graph\"].get(\"title\"),\n x_label=\"Average Elapsed Time (s)\",\n )\n\n desc = Paragraph(docs[\"graph\"].get(\"paragraph\"), style[\"Normal\"])\n data = [[Image(image_path, width=300, height=200, hAlign=\"LEFT\"), desc]]\n elems.append(\n Table(data, style=TableStyle([(\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\")]))\n )\n elems.append(Spacer(0, 5))\n\n # cluster metrics table\n build_pdf_tables(elems, docs[\"cluster_metrics\"], report)\n\n elems.append(PageBreak()) # page 3+ measure tables\n\n build_pdf_tables(\n elems, docs[\"measure_tables\"], report\n ) # build 5 measure tables all at once\n\n # build pdf\n pdf.build(\n elems,\n onFirstPage=partial(first_page, report=report),\n onLaterPages=partial(later_pages, report=report),\n )\n os.remove(image_path)\n\n return pdf.filename", "def generate_pdf_file(self, source):\n filename = self.generate_temp_filename()\n if not filename:\n self.errors.append('filename_generation_failed')\n return\n\n try:\n transform_module = getattr(transforms, self.pdf_generator)\n except AttributeError:\n self.errors.append('wrong_generator_configuration')\n return\n\n self.filename = filename\n url = self.context.absolute_url()\n\n print_css = (self.pdf_tool.always_print_css or\n self.context.portal_type in self.pdf_tool.print_css_types)\n\n # When the source is sent through Ajax, it's already encoded\n # as a utf-8 string. When using it without javascript, the\n # source comes from a view, which always returns unicode. In\n # that case we need to encode it.\n if isinstance(source, unicode):\n source = source.encode('utf-8')\n export_file, err = transform_module.html_to_pdf(\n source,\n self.tempdir,\n filename,\n url,\n print_css,\n self.get_extra_options())\n\n if err:\n self.errors.append('pdf_creation_failed')\n return\n\n self.pdf_tool.registerPDF(filename)\n self.pdf_file = export_file\n self.pdf_file.close()", "def test_create_files(self):\n fitting_report.create_prob_group(result=self.result,\n support_pages_dir=self.dir.name,\n options=self.options)\n self.assertTrue(os.path.exists(self.result.fitting_report_link))", "def dashboard_post(request):\n template = \"pages/dashboard.html\"\n form = CreateLeaderboardForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n add_owner = data[\"add_owner\"]\n del data[\"add_owner\"] # Drop the fields not needed by leaderboard model\n ldb = Leaderboard.objects.create(**data, owner=request.user)\n if add_owner:\n ldb.participants.add(request.user) # Add the creator as participant\n return render(request, template, context=dashboard_context(request))\n else:\n ctx = dashboard_context(request)\n ctx[\"creation_form\"] = form # Send it back with error messages\n return render(request, template, context=ctx)", "def create():", "def create():", "def create_pdf(file_path: Path) -> None:\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n with open(file_path, \"r\") as file:\n for line in file:\n pdf.cell(200, 10, txt=line, ln=1)\n pdf_dir_path: Path = Path(calculate_path(file_path))\n Path(pdf_dir_path.parent).mkdir(parents=True, exist_ok=True)\n pdf_path: str = f\"{str(pdf_dir_path)}.pdf\"\n pdf.output(pdf_path)", "def create_gar(self):\n print('Maketh the report!')\n # Date setup\n date = datetime.today().strftime('%Y-%m-%d')\n year = datetime.today().strftime('%Y')\n\n # Page setup\n geometry_options = {\"tmargin\": \"2cm\",\n \"lmargin\": \"1.8cm\",\n \"rmargin\": \"1.8cm\",\n \"headsep\": \"1cm\"}\n\n doc = pylatex.Document(page_numbers=False,\n geometry_options=geometry_options)\n\n header = self.produce_header_footer()\n\n doc.preamble.append(header)\n doc.change_document_style(\"header\")\n\n #\n # DOCUMENT BODY/CREATION\n with doc.create(pylatex.Section('GeneSippr Analysis Report', numbering=False)):\n doc.append('GeneSippr!')\n\n with doc.create(pylatex.Subsection('GeneSeekr Analysis', numbering=False)) as genesippr_section:\n with doc.create(pylatex.Tabular('|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|')) as table:\n # Header\n table.add_hline()\n table.add_row(self.genesippr_table_columns)\n for sample_name in self.samples:\n table_data = [sample_name]\n for data in self.genesippr_headers:\n try:\n print(sample_name, data, self.report_data['genesippr'][sample_name][data])\n table_data.append(self.report_data['genesippr'][sample_name][data])\n except KeyError:\n pass\n table.add_row(table_data)\n self.create_caption(genesippr_section, 'a', \"+ indicates marker presence : \"\n \"- indicates marker was not detected\")\n\n # Create the PDF\n doc.generate_pdf('{}_{}_{}'\n .format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date), clean_tex=False)\n print('{}_{}_{}'.format(os.path.join('/home/adamkoziol/Bioinformatics/sippr/gui/161104_M02466_0002_000000000-AV4G5'), 'gar', date))\n # for report_name in self.report_data:\n # for sample_name in self.samples:\n # for header, value in self.report_data[report_name][sample_name].items():\n # print(report_name, sample_name, header, value)", "def create_submission_file(row_ids, predictions, use_leaks=False):\n if use_leaks:\n with timer(\"Adding leaks to submission file\"):\n predictions = add_leaks_to_submission(predictions)\n\n submission = pd.DataFrame({\"row_id\": row_ids, \"meter_reading\": predictions})\n\n validate_submission(submission)\n\n submission_dir = \"submissions\"\n os.makedirs(submission_dir, exist_ok=True)\n submission.to_csv(submission_dir + \"/submission.csv\", index=False)", "def gen_sample_report():\n sample_report().save()", "def new_title_page(data_obj, text):\n\n new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.text(0.5, 0.5, text, ha='center', va='center', fontsize=20)", "def create_report(self):\n # Base setup\n line_out = ''\n line_out += \"{:<15} | {:^15} | {:^30}\\n\".format(\"Name\", \"Donations\", \"Email\")\n line_out += (\"-\"*65)\n print(line_out)\n\n # Setup line format to recieve ordered donor info \n for name in self.all_donors:\n line = \"{:<15} | {:^15} | {:^30}\".format(name, self.r.hget(name, 'donations'), self.r.hget(name, 'email'))\n print(line)", "def create_model(self):\n pass", "def create_model(self):\n pass", "def predict():\n print(\"PREDICT ROUTE...\")\n print(\"FORM DATA:\", dict(request.form))\n \n screen_name_a = request.form[\"screen_name_a\"]\n screen_name_b = request.form[\"screen_name_b\"]\n tweet_text = request.form[\"tweet_text\"]\n \n print(\"-----------------\")\n print(\"FETCHING TWEETS FROM THE DATABASE...\") \n user_a = User.query.filter_by(screen_name=screen_name_a).first()\n user_b = User.query.filter_by(screen_name=screen_name_b).first()\n \n user_a_tweets = user_a.tweets \n user_b_tweets = user_b.tweets \n print('Fetched Tweets', len(user_a_tweets)), len(user_b_tweets)\n\n print(\"-----------------\")\n print(\"TRAINING THE MODEL...\")\n\n \n # get embeddings from our database for model\n # note model Input: the embeddings \n # output: lables: screen_names\n \n embeddings = []\n labels = []\n \n for tweet in user_a_tweets:\n embeddings.append(tweet.embedding)\n labels.append(screen_name_a)\n\n for tweet in user_b_tweets:\n embeddings.append(tweet.embedding)\n labels.append(screen_name_b)\n\n\n classifier = LogisticRegression()\n classifier.fit(embeddings, labels)\n\n print(\"-----------------\")\n print(\"MAKING A PREDICTION...\")\n\n target_text_embedding = basilica_api_client.embed_sentence(tweet_text, model='twitter')\n result = classifier.predict([target_text_embedding])\n\n \n return render_template(\"prediction_results.html\",\n screen_name_a=screen_name_a,\n screen_name_b=screen_name_b,\n tweet_text=tweet_text,\n screen_name_most_likely=result[0]\n )", "def create_preview(message):", "def download_assignment_student(request, pk, i):\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, evaluator=request.user).first()\n if evalassignment:\n eval_name = '%s_%s' % (evalassignment.assignment.assignmentype.title.\n replace(\" \", \"\"), i)\n filename = 'assign_%s.%s' % (eval_name, evalassignment.assignment.\n document.name.split('.')[-1])\n response = HttpResponse(evalassignment.assignment.document,\n content_type='application/force_download')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n else:\n return redirect('gradapp:dashboard_student')", "def save_model(script_name, feature_set, model_fname):\n import requests\n import json\n from urllib.parse import urljoin\n\n model_payload = {\n \"model\": {\n \"name\": script_name,\n \"model\": {\n \"type\": \"model/ranklib\",\n \"definition\": {\n }\n }\n }\n }\n\n with open(model_fname) as modelFile:\n model_content = modelFile.read()\n path = \"_ltr/_featureset/%s/_createmodel\" % feature_set\n full_path = urljoin(ES_HOST, path)\n print(\"full_path\", full_path)\n model_payload['model']['model']['definition'] = model_content\n Logger.logger.info(\"POST %s\" % full_path)\n head = {'Content-Type': 'application/json'}\n resp = requests.post(full_path, data=json.dumps(model_payload), auth = HTTPBasicAuth(ES_User,ES_Passw),headers=head,verify=False)\n Logger.logger.info(resp.status_code)\n if resp.status_code >= 300:\n Logger.logger.error(resp.text)", "def download_report():\n entities = get_names()\n save_csv(entities)" ]
[ "0.67373437", "0.63020295", "0.6226293", "0.6087851", "0.5910283", "0.58224714", "0.57705843", "0.5737507", "0.5713494", "0.56827766", "0.5535205", "0.55348325", "0.5490096", "0.54388666", "0.5369891", "0.53277147", "0.52944934", "0.52798676", "0.5266541", "0.5252111", "0.52401656", "0.5238857", "0.5215014", "0.5207984", "0.5201335", "0.51799184", "0.5165266", "0.5165167", "0.5160144", "0.5129794", "0.51244265", "0.512403", "0.51207536", "0.51143837", "0.5094863", "0.5081981", "0.5065907", "0.50383914", "0.503795", "0.5035362", "0.5030674", "0.5029336", "0.5013086", "0.50076735", "0.4992573", "0.4983746", "0.49758387", "0.49674547", "0.49666324", "0.49651214", "0.49555036", "0.49538228", "0.49322265", "0.4926208", "0.49185532", "0.4911218", "0.49098918", "0.48921508", "0.48847815", "0.4883265", "0.48771548", "0.4875365", "0.4874843", "0.48746413", "0.48740345", "0.4873733", "0.48693094", "0.48513633", "0.48452815", "0.48419693", "0.48409572", "0.48371553", "0.48302057", "0.48268583", "0.4826748", "0.48245767", "0.48213515", "0.48194546", "0.4818638", "0.4814691", "0.4797616", "0.47972858", "0.4793916", "0.47844467", "0.4782392", "0.47693694", "0.47693694", "0.47675925", "0.47646767", "0.47634536", "0.47634187", "0.47556522", "0.47527263", "0.47379372", "0.47379372", "0.4737074", "0.4736331", "0.47326043", "0.47307032", "0.4724006" ]
0.71051025
0
View for employee development plan details
def development_plan_details(request, development_plan_id): #, employee_id ): # employee = Employee.objects.get(user__pk=request.user.pk) # employee = Employee.objects.filter(pk=int(employee_id)).first() development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id)) current_employee = Employee.objects.filter(user__pk=request.user.pk).first() all_employees = development_plan.employee_relation.all() try: development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id)) data={} development_plan_object_list=[] dev_plan={} dev_plan["id"] = development_plan.id dev_plan["deleted"] = development_plan.deleted if development_plan.type: dev_plan["type"] = development_plan.type.name # dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\ # .finished_at dev_plan["created_at"] = development_plan.created_at dev_plan["created_by"] = development_plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) # manager_relation manager_data={} manager_data["manager_username"] = development_plan.manager_relation.user.username manager_data["manager_first_name"] = development_plan.manager_relation.user.first_name manager_data["manager_last_name"] = development_plan.manager_relation.user.last_name development_plan_object_list.append({"manager_data":manager_data}) # employee_relation employee_data={} all_employees = development_plan.employee_relation.all() if all_employees: emp_list=[] for emp in all_employees: emp_data={} emp_data["id"] = emp.user.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["status_questions"] = emp.status_questions emp_data["dev_plan_finished_at"] = DevelopmentPlanToEmployeeRelation\ .objects.get(employee=emp, development_plan = development_plan)\ .finished_at employee_role = EmployeeRole.objects.filter(employee=emp).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) emp_data["roles"] = name_role_list emp_list.append(emp_data) employee_data={"all_employees":emp_list} else: return JsonResponse(data={"details":"Any employee has Development Plan with id={}" .format(development_plan.id)}, status=404) development_plan_object_list.append({"employee_data":employee_data}) # competence_parts all_competence_parts = development_plan.competence_parts.all() competence_list = [] questions_list = [] sliders_list = [] if all_competence_parts: for comp_part in all_competence_parts: comp_part_data={} competence_d={"competence_parts": []} comp_part_data["id"] = comp_part.id comp_part_data["title"] = comp_part.title comp_part_data["description"] = comp_part.description comp_part_data["competence_status"] = comp_part.competence_status all_questions = comp_part.question_set.all() if all_questions: for question in all_questions: question_data = {} question_data["question_id"] = question.id question_data["title"] = question.title question_data["competence_part"] = question.competence_part.id answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee if answer: question_data["answer_id"] = answer.id question_data["answer"] = answer.title questions_list.append(question_data) comp_part_data["questions"] = questions_list all_sliders = comp_part.slider_set.all() if all_sliders: for slider in all_sliders: slider_data = {} slider_data["slider_id"] = slider.id slider_data["scale"] = slider.scale slider_data["competence_part"] = slider.competence_part.id answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee if slider: slider_data["answer_id"] = answer.id slider_data["answer"] = answer.slider.scale sliders_list.append(slider_data) comp_part_data["sliders"] = sliders_list comp_part_data["created_at"] = comp_part.created_at comp_part_data["created_by"] = comp_part.created_by.username comp_part_data["updated_at"] = comp_part.updated_at comp_part_data["updated_by"] = comp_part.updated_by.username competence_keys_list = ['id', 'title', 'description', 'language_code', 'status'] if not competence_list: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) else: competence_found = False for competence_dict in competence_list: if competence_dict['id'] == comp_part.competence.id: competence_dict['competence_parts'].append(comp_part_data) competence_found = True break if not competence_found: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) development_plan_object_list.append({"competences":competence_list}) else: return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet" .format(development_plan.id)}, status=404) data = {"dev_plan:": development_plan_object_list} return JsonResponse(status=201, data=data) except DevelopmentPlan.DoesNotExist: return JsonResponse(data={"details":"Development Plan with this id doesn't exist"}, status=404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)", "def __str__(self):\n return self.plan.title", "def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def __str__(self):\n return self.plan", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })", "def plan(self, plan_code):\r\n return pl.Plan(self, plan_code)", "def details(request):\n\treturn render(request, 'ExcelApp/main.html')", "def view(self, parent, **kargs):\n design = Service('Design')\n return design.view_list(parent, self, **kargs)", "def show():\n info(str(Project))", "def display_accounts_details():\n return Records.display_records()", "def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def org_details(request, org_id):\n org = Organization.objects.get(pk=org_id)\n maps = VisitingCards.objects.filter(organization=org)\n designations = [m.designation for m in maps]\n users = [m.user for m in maps]\n # Remove None objects\n from operator import is_not\n from functools import partial\n designations = filter(partial(is_not, None),designations)\n users = filter(partial(is_not, None),users)\n\n return render_to_response('organization/details.html',\n {\"org\": org, \"maps\": maps, \"designations\": designations, \"users\":users},\n context_instance=RequestContext(request))", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def show(self):\n return self._project.show()", "def get_and_check_plan(request, company):\n model = PlanModel(request.session)\n guid = request.matchdict['plan_guid']\n plan = model.get(guid)\n if plan is None:\n raise HTTPNotFound('No such plan {}'.format(guid))\n if plan.company_guid != company.guid:\n raise HTTPForbidden('You have no permission to access plan {}'\n .format(guid))\n return plan", "def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)", "def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html", "def report_development(request):\n q = Q(app_status__name__iequals='Current Version') # actual?\n q = q | Q(app_status__name__iequals='In Development') # projected?\n q = q | Q(app_status__name__iequals='In Suspense') # supense\n q = q | Q(app_status__name__iequals='Unassigned') # TBD?\n apps = Application.objects.filter(q).values('release_date', 'release', 'acronym', 'sr_number', 'owner_org', 'nasa_requester', 'release_change_description', 'app_status__name').order_by('release_date', 'acronym', 'release')\n return render_to_response('report/app_pipeline_abbrev.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def describe_provisioned_product_plan_single_page(self, **kwargs):\n return slurp(\n 'describe_provisioned_product_plan',\n self.describe_provisioned_product_plan,\n 'ProvisionedProductPlanDetails',\n **kwargs\n )", "def follow_workoutplan(request, pk):\n return render(request, 'workouts/starting_date_form.html')", "def plans():", "def computer_detail(request, computer_id):\n\n computer = get_object_or_404(Computer, pk=computer_id)\n current_assignment = EmployeeComputer.objects.filter(computer_id=computer_id).filter(date_revoked=None)\n assignment_history = EmployeeComputer.objects.filter(computer_id=computer_id).exclude(date_revoked=None).order_by('-date_assigned')\n\n context = {\n \"computer\": computer,\n \"current_assignment\": current_assignment,\n \"assignment_history\": assignment_history\n }\n\n return render(request, \"agileHR/computer_detail.html\", context)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def delegation_show(request, pk):\n delegation = Delegation.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(delegation_id=pk).order_by(\"committee__name\")\n\n context = {\"delegation\": delegation, \"delegates\": delegates, \"delegation_show\": True}\n template = \"jurycore/delegation_show.html\"\n return render(request, template, context)", "def usecases(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/usecases.html', context)", "def projectdetails(http_request, project_id=0):\n\tp = get_object_or_404(Project, pk=project_id)\n\treturn render_to_response('project_detail.html', {'project': p})", "def project_detail(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n project.description = markdown.markdown(bleach.clean(project.description, strip=True), extensions=['markdown.extensions.fenced_code'])\n p2 = Project.objects.get(pk=project_id)\n user_profile = UserProfile.objects.get(email=request.session['email'])\n submissions_list = Submission.objects.filter(project=project)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n context = {'project': project, 'submissions_list':submissions_list, 'current_user': request.session['email'], 'user_profile': user_profile}\n return render(request, 'projects/details.html', context)", "def report_development(request):\n apps = Application.objects.filter(app_status__name__icontains='Development').order_by('acronym', 'release')\n return render_to_response('application/search_results.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def departments():\n # gather data from db about all departments\n return render_template(\"departments.html\")", "def department(department_id):\n\n department_obj = Department.query.get_or_404(department_id)\n employees = Employee.query.filter_by(department_id=department_id)\n return render_template('department/department.html',\n department=department_obj, employees=employees)", "def plan(self):\n raise NotImplementedError('You must implement the plan() method '\n 'yourself!')", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def user_project_view(cls, user, project):\r\n pass", "def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)", "def show_equipments(self): \n database = Database('data/database.db')\n equipments = database.read_equipments()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name, item.installation_number] for item in equipments],\n pageTitle = \"Équipements\",\n tableTitle = \"Liste de tous les équipements\",\n ths = [\"Numéro\", \"Nom\", \"Numéro d'installation\"]\n )", "def get(self, request):\n career_planning_id = request.GET.get('career_planning_id')\n LOGGER.info(\"CareerPlanning id:%s\", career_planning_id)\n career_planning = CareerPlanning.objects.get(id=career_planning_id)\n career_planning_dict = model_to_dict(career_planning)\n return Response({\"status\": \"SUCCESS\", \"data\": career_planning_dict})", "def test_project_detail(self):\n rv = self.app.get(\"/Assignment0\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"2015-02-04 21:57:12.156363\", rv.data)\n self.assertIn(\"221\", rv.data)\n self.assertIn(\"commit assignment0\", rv.data)\n\n self.assertIn(\"Assignment0/Procfile\", rv.data)\n self.assertIn(\"Assignment0/README.md\", rv.data)", "def purchase_indent_show(request, request_id):\n purchase_indent_request = get_object_or_404(PurchaseIndentRequest, pk=request_id)\n current_employee = request.user.employee_set.all()[0]\n\n # Check if logged in user is indenter, indenter's HOD, JAO or DR\n if purchase_indent_request.indenter == current_employee or \\\n purchase_indent_request.indenter.department.hod_id == current_employee.id or \\\n request.user.groups.filter(name__in=['JrAO_AccountsDepartment', 'DR_AccountsDepartment',\n 'AccountsDepartment', 'PurchaseDepartment']).exists():\n return render(request, 'purchase/purchase_indent/show.html',\n {'purchase_indent_request': purchase_indent_request})\n\n else:\n return PermissionDenied", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def plante_info(id):\n plante = get_plante(id)\n return render_template(\n \"plante-info.html\",\n plante = plante,\n title = plante.get_name(),\n parterre = get_parterre(plante.get_parterre()))", "def display_employee(self):\n print \"[Name: %s] [Salary: %d]\" % (self.name, self.salary)", "def list_departments():\n \t check_admin()\n\n #check all the departments in the database and assign them to a variable.departments \n \t departments = Department.query.all()\n\n \t return render_template('admin/departments/departments.html',departments = departments,title = \"Departments\")", "def plans(request):\n results = Product.objects.filter(category__icontains='P')\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': results,\n 'stars': stars\n }\n if not results:\n messages.error(request, \"No plans as of yet, that will change soon!\")\n return redirect(reverse('products'))\n else:\n return render(request, \"products.html\", context)", "def get_floor_plan(port_id):\n url = 'https://api.archisketch.com/v1/public/projects/'\n response = requests.get(url + port_id + '/detail')\n response = response.json()['project']\n floor_plan = response['floorplans'][0]\n return floor_plan", "def model_plan_feature(cfg, model, developer_gen):\n model.ticket = cfg[\"ticket_id_template\"] % (choose_in(1, cfg[\"repo_age_in_days\"]),)\n model.planned = choose_in(1, cfg[\"max_commits_per_branch\"])\n model.developer = next(developer_gen)\n return model", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def print_details(self):\n self.view.print_details()", "def show(self):\n if self.columns is None:\n return\n\n now = time.time()\n if self.columns != [\"default\"]:\n self.ctxt.sort_fields = None\n else:\n self.ctxt.sort_fields = []\n\n df = self._invoke_sqobj(self.sqobj.get,\n hostname=self.hostname, columns=self.columns,\n namespace=self.namespace,\n query_str=self.query_str,\n )\n\n self.ctxt.exec_time = \"{:5.4f}s\".format(time.time() - now)\n\n if not self.format or (self.format == 'text'):\n self.format = 'devconfig'\n return self._gen_output(df)", "def viewProject(self, projectId=None,size=None):\n\n uri = \"/v1/projects/\"\n if projectId:\n uri = uri + str(projectId)\n if size==0:\n uri =uri + \"?size=0\"\n response = self.client.get(uri)\n return response", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def team_details(request, id):\n template = loader.get_template('team/details.html')\n\n try:\n team = Team.objects.get(pk=id)\n team_members = User.objects.filter(profile__team=team)\n\n context = {\n 'team_name': team.name,\n 'team_info': team.information,\n 'team_logo': team.logo,\n 'team_members': team_members,\n 'days': Information.getDaysToContest()\n }\n\n except Team.DoesNotExist:\n context = None\n\n return CustomHttpResponse.send(template, context, request)", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def user_project_view(cls, user, project):\n pass", "def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))", "def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def plans(self):\r\n return Plans(self)", "def view_cases(context,case_id):\n\n adapter = context.obj['adapter']\n\n if case_id is not None:\n results = adapter.find_case({'case_id': case_id})\n\n else:\n results = adapter.find_cases({})\n\n click.echo(pprint(results))", "def __str__(self):\n\n return self.department_name", "def detail(request, reachcode):\n lake = get_object_or_404(Lake, reachcode=reachcode)\n photos = Photo.objects.filter(lake=lake)\n documents = Document.objects.filter(lake=lake)\n plants = lake.plants.all()\n return render(request, \"lakes/detail.html\", {\n \"lake\": lake,\n \"photos\": photos,\n \"documents\": documents,\n \"plants\": plants,\n })", "def list(cls):\n return cls().requests.get('plan')", "def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")", "def proposal(request):\n context={\n\n\n }\n\n return render(request, 'valor_airquality/proposal.html', context)", "def employee():\n return Response(render_template('employee/employee.html'))", "def record_detail(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member, details are provided and template is rendered.\n record = get_object_or_404(models.Record, pk=pk)\n project = models.Project.objects.get(slug=slug)\n template = 'records/record_detail.html'\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'record':record,\n 'project':project,\n 'userperm':project.memberships.get(user=request.user),\n 'data':data\n }\n return render(request,template,context)", "def printing_view(request):\n committees = Committee.objects.all().order_by(\"name\")\n\n context = {\"committees\": committees}\n template = \"jurycore/printing_view.html\"\n return render(request, template, context)", "def display_departmentlist():\n\tdeptid = 0\n\tprint\n\tprint '[*] Fetching departments list'\n\n\t# call the api function\n\tsupportdepartments = whmcs.getsupportdepartments()\n\tif supportdepartments == None:\n\t\tprint '[x] WHMCS getsupportdepartments API function call failed.'\n\t\tprint '[!] exiting.'\n\t\t_exit(0)\n\n\t# reconnect if ssl or url error orccured\n\twhile supportdepartments == 'sslerror' or supportdepartments == 'urlerror':\n\t\tprint '[!] Re-establishing connection after 5 seconds'\n\t\ttry: time.sleep(5)\n\t\texcept KeyboardInterrupt: print '\\n[!] exiting.'; _exit()\n\t\tsupportdepartments = whmcs.getsupportdepartments()\n\n\tresult = supportdepartments.get('result')\n\ttotalresults = supportdepartments.get('totalresults')\n\tif result != 'success' or totalresults == 0:\n\t\tprint '[x] Unable to find any support departments on (%s).' % (parser.get('whmcs', 'server'))\n\t\tprint '[x] %s.' % supportdepartments.get('message')\n\t\t_exit()\n\n\t#############################\n\t## Display Department List ##\n\t#############################\n\t# Eg: {'departments': { 'department': [{'id': ,'name': ,'awaitingreply': ,'opentickets': ,}, {...}]}}\n\n\tdepartments = supportdepartments.get('departments').get('department')\n\trowformat = '| %-5s | %-20s | %-15s | %-15s |'\n\theader = ('ID', 'Department', 'Awaiting Reply', 'Open Tickets')\n\ttitle = rowformat % header\n\tprint '-' * len(title)\n\tprint title\n\tprint '-' * len(title)\n\tdeptlist = []\n\tfor department in departments:\n\t\tdeptid = department['id']\n\t\tdeptlist.append(deptid)\n\t\tdeptname=department['name']\n\t\tif len(deptname) > 20:\n\t\t\tdeptname = deptname[:20-4]+'...'\n\t\tprint rowformat % (deptid, deptname, department.get('awaitingreply'), department.get('opentickets'))\n\t\tprint '-' * len(title)\n\n\t# Display department ID selection prompt\n\twhile 1:\n\t\ttry:\n\t\t\tdeptid = raw_input('[+] Select Department ID: ')\n\t\texcept KeyboardInterrupt:\n\t\t\tprint '\\n[!] exiting.cleanly.'\n\t\t\texit()\n\n\t\tif type(deptid) != int and deptid not in deptlist:\n\t\t\tprint '[!] Invalid Department ID (%s).' % deptid\n\t\telse:\n\t\t\tbreak\n\treturn deptid", "def view_experiment(request,id):\n\texp = Experiment.objects.get(id=id)\n\tpossibly_related = get_related(exp)\n\treturn list_detail.object_detail(request,\n\t\t\t\t\t\t\t\t\tqueryset=Experiment.objects.filter(id=id),\n\t\t\t\t\t\t\t\t\tobject_id=exp.id,\n\t\t\t\t\t\t\t\t\ttemplate_name='experiments/experiment.html',\n\t\t\t\t\t\t\t\t\textra_context= {\"possibly_related\" : possibly_related})", "def portfolio_detail():\n return render_template('portfolio/portfolio.html')", "def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")", "def studio_preview_view(self, context):\r\n fragment = Fragment()\r\n contents = []\r\n\r\n for child in self.descriptor.get_children():\r\n rendered_child = self.runtime.get_module(child).render('student_view', context)\r\n fragment.add_frag_resources(rendered_child)\r\n\r\n contents.append({\r\n 'id': child.location.to_deprecated_string(),\r\n 'content': rendered_child.content\r\n })\r\n\r\n fragment.add_content(self.system.render_template('vert_module.html', {\r\n 'items': contents\r\n }))\r\n\r\n return fragment", "def show(self, projectorSerial=\"*\"):\n heads = self.fieldNames\n\n # Note that apparently table and field names cannot be parameterized\n # in SQLite execute() statements.\n if projectorSerial == \"*\":\n\n self._db._c.execute(\"SELECT * FROM ProjectorStatus\")\n else:\n\n self._db._c.execute(\"SELECT * FROM ProjectorStatus WHERE projectorSerial = ?\", (projectorSerial,))\n\n rows = self._db._c.fetchall()\n\n # Set up a container in which to hold the last error record.\n errorRecord = \"\"\n \n # First calculate the maximum lengths for each column.\n lengths = map(len, heads)\n for row in rows:\n errorRecord = row[-1]\n lengths = map(max, lengths, map(len, row))\n lengths[-1] = len(heads[-1])\n \n # Create a format string for the maximum lengths.\n formatString = (\"|{{:^{}}}\" * len(heads) + \"|\").format(*lengths)\n\n # Print the heads, then the contents.\n headLine = formatString.format(*heads)\n border = \"-\" * len(headLine)\n print(\"ProjectorStatus\")\n print(border)\n print(headLine)\n print(border)\n\n # Remake the format string right-justified.\n formatString = (\"|{{:>{}}}\" * len(heads) + \"|\").format(*lengths)\n for row in rows:\n listRow = list(row)\n listRow[-1] = \"*\"\n print(formatString.format(*listRow))\n print(border)\n\n print(\"* The error record for the last projector in the table above is this:\")\n print(errorRecord)", "def plans(self):\r\n return pl.Plans(self)", "def showcase(request):\n\n showcases = Showcase.objects.all\n context = {\n 'showcases': showcases,\n }\n\n return render(request, 'showcase/showcase.html', context)", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def show_all_departments():\n\n logger.debug('Function show_all_departments(). Routed to /departments')\n titles = ['Name', 'Average Salary', 'Employees']\n departments = ds.get_all()\n logger.info('Get list of departments, length is %i', len(departments))\n return render_template('departments.html',\n title='Departments',\n table_title='List of Departments',\n headers=titles,\n departments=departments)", "def show_completed_design(completed_design):\n print(\"\\nThe following models have been printed:\")\n for completed_designs in completed_design:\n print(completed_designs)", "def show_facilities(request, geo_code, dataset):\n geo = Geography\\\n .objects\\\n .get(geo_code=geo_code, version='2011')\n groups = {\n 'higher_education': {\n 'model': HigherEducation,\n 'table': HigherTable,\n 'name': 'Higher Education Institutions'\n },\n 'basic_education': {\n 'model': BasicEducation,\n 'table': BasicTable,\n 'name': 'Basic Education Institutions'\n },\n 'health': {\n 'model': HealthFacilities,\n 'table': HealthTable,\n 'name': 'Health Facilities'\n }\n }\n query = groups[dataset]['model']\\\n .objects\\\n .filter(geo_levels__overlap=[geo_code])\n table = groups[dataset]['table'](query)\n table.paginate(page=request.GET.get('page', 1), per_page=25)\n return render(request, 'facilities.djhtml', {\n 'facilities': table,\n 'geo': geo,\n 'name': groups[dataset]['name']\n })", "def home():\n departments = queries.get_departments_with_avg_salary()\n return render_template('department/departments.html',\n departments=departments)", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def desks():\n desks = Desk.query.all()\n return render_template('desks.html', desks=desks)", "def view_team_page(request, team_pk):\n\t\n\tselected_team = ChallengeTeam.objects.get(pk = team_pk)\n\t\n\tusers = selected_team.team_members.all()\n\t\n\tteam_name = selected_team.team_name\n\t\n\tall_results = get_team_results(users, selected_team.challenge.schedule)\n\tteam_consistency = all_results[\"consistency\"]\n\tteam_completion = all_results[\"completion\"]\n\t\n\tmember_names = []\n\tfor usr in users:\n\t\tprint usr.first_name + \" \" + usr.last_name\n\t\tmember_names.append(usr.first_name + \" \" + usr.last_name)\n\t\t\n\tjoin_control = \"join\"\n\tif(request.user in selected_team.team_members.all()):\n\t\tjoin_control = \"leave\"\n\telif(selected_team.challenge.invite_only and not request.user in selected_team.invited.all()):\n\t\tjoin_control = \"invite\"\n\t\n\tcontext = RequestContext(request, { \"team_pk\" : team_pk, \"name\" : team_name, \"members\" : member_names, \"consistency\" : team_consistency, \"completion\" : team_completion, \"join_control\" : join_control, \"messages\" : messages })\n\treturn render_to_response(\"encourage/view_team.html\", context)", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def my_dashboard_print(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_assoc.ProjectAssocTable_Printable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_assoc.TaskAssocTable_Printable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard_printable.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n },\n )", "def print_event_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'event_start_date': self.event_start_date,\n 'event_end_date': self.event_end_date,\n 'agenda': self.env.context.get('default_agenda_id'),\n },\n }\n return self.env.ref('agenda_esi.recap_report').report_action(self, data=data)", "def printDesignVariables(self):\n print(\"-\" * 85)\n print(\"{:>30}{:>20}{:>20}\".format(\"CSM Design Parameter\", \"Name\", \"Value\"))\n print(\"-\" * 85)\n for dvName in self.DVs:\n DV = self.DVs[dvName]\n print(f\"{DV.csmDesPmtr:>30}{DV.name:>20}{DV.value:>20}\")", "def workoutplan_detail(request, pk):\n template_data = {}\n plan = get_object_or_404(WorkoutPlan, pk=pk)\n workouts = Workout.objects.filter(workout_plan__id=pk).order_by('plan_week', 'plan_day')\n\n for workout in workouts:\n if workout.plan_user.filter(username=request.user.username).exists():\n template_data['following'] = True\n else:\n template_data['following'] = False\n\n if request.method == 'POST':\n form = StartingDateForm(request.POST)\n if form.is_valid():\n starting_date = form.cleaned_data.get('workout_day')\n for workout in workouts:\n if workout.plan_week == 1:\n if workout.plan_day == 1:\n workout.plan_user.add(request.user)\n workout.workout_day = starting_date\n workout.save()\n else:\n workout.plan_user.add(request.user)\n new_date = starting_date + timedelta(days = workout.plan_day - 1)\n workout.workout_day = new_date\n workout.save()\n else:\n new_day = 7 * (workout.plan_week - 1) + (workout.plan_day - 1)\n new_date = starting_date + timedelta(days = new_day)\n workout.plan_user.add(request.user)\n workout.workout_day = new_date\n workout.save()\n messages.success(request, f'This workout plan has been added to your calendar')\n return HttpResponseRedirect(reverse('workout-program-detail', kwargs={'pk': pk}))\n else:\n form = StartingDateForm()\n\n form = StartingDateForm()\n template_data['form'] = form\n template_data['workouts'] = workouts\n template_data['plan'] = plan\n\n return render(request, 'workouts/workoutplan_detail.html', template_data)", "def human_print_plan(plan: object):\n print(f'Name: {plan[\"name\"]}')\n print(f'Description: {plan[\"description\"] if \"description\" in plan else \"N/A\"}')\n print(f'Services: {BackupServicePlan.service_list_to_str(plan[\"services\"])}')\n print(f'Default: {(plan[\"default\"] if \"deafult\" in plan else False)!s}')\n\n # If the are no tasks return\n if not plan[\"tasks\"]:\n return\n\n print()\n print('Tasks:')\n task_name_pad = 5\n schedule_pad = 10\n for task in plan['tasks']:\n if len(task['name']) > task_name_pad:\n task_name_pad = len(task['name'])\n\n task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])\n if len(task['schedule_str']) > schedule_pad:\n schedule_pad = len(task['schedule_str'])\n\n task_name_pad += 1\n schedule_pad += 1\n\n header = f'{\"Name\":<{task_name_pad}} | {\"Schedule\":<{schedule_pad}} | Options'\n print(header)\n print('-' * (len(header) + 5))\n\n for task in plan['tasks']:\n options = BackupServicePlan.format_options(task)\n print(f'{task[\"name\"]:<{task_name_pad}} | {task[\"schedule_str\"]:<{schedule_pad}} | {options}')", "def run_view(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"\", perms = \"View\")" ]
[ "0.6865501", "0.6419754", "0.64083785", "0.6235996", "0.6046271", "0.60325843", "0.60079944", "0.5864522", "0.5858046", "0.58436126", "0.5818266", "0.57469684", "0.5691461", "0.5501445", "0.54863083", "0.5485314", "0.5460011", "0.54553616", "0.54523605", "0.5448519", "0.54469216", "0.54441017", "0.5435253", "0.54303443", "0.5428215", "0.541232", "0.53913134", "0.53536326", "0.53465587", "0.534052", "0.533517", "0.5322604", "0.53222173", "0.5304521", "0.529304", "0.52823246", "0.52796006", "0.52775544", "0.5260662", "0.5242532", "0.5240547", "0.52321696", "0.5209872", "0.52055836", "0.5187524", "0.51829827", "0.51722485", "0.5162948", "0.5158022", "0.5150185", "0.5144987", "0.5143525", "0.51399606", "0.5130683", "0.5122746", "0.5117478", "0.5111648", "0.5109115", "0.510276", "0.51001364", "0.5073667", "0.50732094", "0.506858", "0.5065471", "0.5059538", "0.5056129", "0.50557756", "0.5050509", "0.5049115", "0.5048668", "0.50361896", "0.5033961", "0.5028105", "0.50192106", "0.5016597", "0.5012059", "0.5002036", "0.50013274", "0.49966246", "0.49881768", "0.4979482", "0.49760813", "0.49756348", "0.49715963", "0.4951526", "0.49514365", "0.4951251", "0.4937955", "0.49367094", "0.4935973", "0.49338603", "0.49312174", "0.49280542", "0.49221948", "0.49191073", "0.4911417", "0.49108395", "0.49042746", "0.4903673", "0.49035984" ]
0.68573505
1
View a list of user's development plans for manager
def get_all_user_development_plans_for_manager(request, employee_id): current_employee = Employee.objects.get(user__pk=request.user.pk) user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all() employee = Employee.objects.filter(pk=int(employee_id)).first() if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if not current_employee.isEnsoUser() and current_employee.is_manager: raise PermissionDenied() actions = employee.action_set.all() if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]: raise PermissionDenied("Employee with id={} is not assigned to you.".format(employee_id), 401) if user_development_plans: data={} user_development_plans_list = [] for plan in user_development_plans: development_plan_object_list=[] dev_plan = {} dev_plan["id"] = plan.id dev_plan["deleted"] = plan.deleted if plan.type: dev_plan["type"] = plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = plan).finished_at dev_plan["created_at"] = plan.created_at dev_plan["created_by"] = plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) manager_data = {} manager_data["manager_username"] = plan.manager_relation.user.username manager_data["id"] = plan.manager_relation.user.id development_plan_object_list.append({"manager_data":manager_data}) user_development_plans_list.append(development_plan_object_list) else: return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan" .format(request.user.pk)}, status=404) data = {"user_development_plans:": user_development_plans_list} return JsonResponse(status=201, data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def plans():", "def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })", "def usersview_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'usersview_admin.jinja2',\n users=users\n )", "def plans(self):\r\n return pl.Plans(self)", "def list(cls):\n return cls().requests.get('plan')", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def plans(self):\r\n return Plans(self)", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)", "def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)", "def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def get_goals_todo_info(self, cr, uid, context=None):\n all_goals_info = []\n plan_obj = self.pool.get('gamification.goal.plan')\n\n plan_ids = plan_obj.search(cr, uid, [('user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in plan_obj.browse(cr, uid, plan_ids, context=context):\n # serialize goals info to be able to use it in javascript\n serialized_goals_info = {\n 'id': plan.id,\n 'name': plan.name,\n 'visibility_mode': plan.visibility_mode,\n }\n user = self.browse(cr, uid, uid, context=context)\n serialized_goals_info['currency'] = user.company_id.currency_id.id\n\n if plan.visibility_mode == 'board':\n # board report should be grouped by planline for all users\n goals_info = plan_obj.get_board_goal_info(cr, uid, plan, subset_goal_ids=False, context=context)\n\n if len(goals_info) == 0:\n # plan with no valid planlines\n continue\n\n serialized_goals_info['planlines'] = []\n for planline_board in goals_info:\n vals = {'type_name': planline_board['goal_type'].name,\n 'type_description': planline_board['goal_type'].description,\n 'type_condition': planline_board['goal_type'].condition,\n 'type_computation_mode': planline_board['goal_type'].computation_mode,\n 'type_monetary': planline_board['goal_type'].monetary,\n 'type_suffix': planline_board['goal_type'].suffix,\n 'type_action': True if planline_board['goal_type'].action_id else False,\n 'type_display': planline_board['goal_type'].display_mode,\n 'target_goal': planline_board['target_goal'],\n 'goals': []}\n for goal in planline_board['board_goals']:\n # Keep only the Top 3 and the current user\n if goal[0] > 2 and goal[1].user_id.id != uid:\n continue\n\n vals['goals'].append({\n 'rank': goal[0] + 1,\n 'id': goal[1].id,\n 'user_id': goal[1].user_id.id,\n 'user_name': goal[1].user_id.name,\n 'state': goal[1].state,\n 'completeness': goal[1].completeness,\n 'current': goal[1].current,\n 'target_goal': goal[1].target_goal,\n })\n if uid == goal[1].user_id.id:\n vals['own_goal_id'] = goal[1].id\n serialized_goals_info['planlines'].append(vals)\n\n else:\n # individual report are simply a list of goal\n goals_info = plan_obj.get_indivual_goal_info(cr, uid, uid, plan, subset_goal_ids=False, context=context)\n\n if not goals_info:\n continue\n\n serialized_goals_info['goals'] = []\n for goal in goals_info:\n serialized_goals_info['goals'].append({\n 'id': goal.id,\n 'type_name': goal.type_id.name,\n 'type_description': goal.type_description,\n 'type_condition': goal.type_id.condition,\n 'type_monetary': goal.type_id.monetary,\n 'type_suffix': goal.type_id.suffix,\n 'type_action': True if goal.type_id.action_id else False,\n 'type_display': goal.type_id.display_mode,\n 'state': goal.state,\n 'completeness': goal.completeness,\n 'computation_mode': goal.computation_mode,\n 'current': goal.current,\n 'target_goal': goal.target_goal,\n })\n\n all_goals_info.append(serialized_goals_info)\n return all_goals_info", "def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)", "def user_project_view(cls, user, project):\r\n pass", "def changelist_view(self, request, extra_context=None):\n if request.user.user_type == User.ADMIN_CEA:\n self.list_display = ('user', 'cea', 'booking', 'request_status')\n elif request.user.user_type == User.ADMIN_CRC:\n self.list_display = ('user', 'crc', 'booking', 'request_status')\n elif request.user.user_type == User.EXPRESS_USER:\n self.list_display = ('user', 'payment_type', 'request_status', 'credit_status', 'booking')\n else:\n self.list_display = ('user', 'booking','cea', 'crc', 'transit', 'payment_type', 'request_status',)\n return super(RequestAdmin, self).changelist_view(request, extra_context)", "def KLP_Users_list(request):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions, to get user list\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get all active(1) users list other than staff and super user order by username\n\n user_list = User.objects.filter(is_staff=0,\n is_superuser=0).order_by('username')\n\n # render show users form with users list\n\n return render_to_response('viewtemplates/show_users_form.html',\n {\n 'user_list': user_list,\n 'user': user,\n 'title': 'KLP Users',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")", "def list_plans(self, json_output: bool = False):\n plans, errors = self.rest.list_backup_plans()\n _exit_if_errors(errors)\n if json_output:\n print(json.dumps(plans, indent=2))\n else:\n self.human_print_plans(plans)", "def developer_dahsboard(request):\r\n\tif not request.user.profile.is_developer:\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\tgames = Game.objects.filter(developer = request.user.profile)\r\n\treturn render(request, \"dashboard.html\", {'MEDIA_URL': settings.MEDIA_URL, 'games': games})", "def get(self):\n DA = DataAccessor()\n students = DA.getStudents()\n admins = DA.getAdmins()\n self.generate('manageUsers.html', {\n 'admins' : admins,\n 'students' : students\n })", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def __str__(self):\n return self.plan", "def list_departments():\n \t check_admin()\n\n #check all the departments in the database and assign them to a variable.departments \n \t departments = Department.query.all()\n\n \t return render_template('admin/departments/departments.html',departments = departments,title = \"Departments\")", "def signuprequests_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'signuprequests_admin.jinja2',\n title='Signup Requests Dashboard',\n users=users\n )", "def user_admin_list_data():\n video = VideoFactory()\n collection = video.collection\n moira_list = factories.MoiraListFactory()\n collection.admin_lists.set([moira_list])\n return SimpleNamespace(video=video, moira_list=moira_list, collection=collection)", "def get_plans(self):\n return stripe.Plan.all()", "def user_project_view(cls, user, project):\n pass", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")", "def get_and_check_plan(request, company):\n model = PlanModel(request.session)\n guid = request.matchdict['plan_guid']\n plan = model.get(guid)\n if plan is None:\n raise HTTPNotFound('No such plan {}'.format(guid))\n if plan.company_guid != company.guid:\n raise HTTPForbidden('You have no permission to access plan {}'\n .format(guid))\n return plan", "def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def get_challenge_suggestions(self, cr, uid, context=None):\n plan_info = []\n goal_plan_obj = self.pool.get('gamification.goal.plan')\n plan_ids = goal_plan_obj.search(cr, uid, [('proposed_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in goal_plan_obj.browse(cr, uid, plan_ids, context=context):\n values = {\n 'id': plan.id,\n 'name': plan.name,\n 'description': plan.description,\n }\n plan_info.append(values)\n return plan_info", "def model_plan_feature(cfg, model, developer_gen):\n model.ticket = cfg[\"ticket_id_template\"] % (choose_in(1, cfg[\"repo_age_in_days\"]),)\n model.planned = choose_in(1, cfg[\"max_commits_per_branch\"])\n model.developer = next(developer_gen)\n return model", "def get(self):\n return GetListOfSavingPlan(current_user.id)", "def __str__(self):\n return self.plan.title", "def dashboard():\n # TODO: Optionally, old proposals should be shown in a read-only mode.\n talks = Talk.query.current.filter(Talk.user == current_user)\n return render_template(\n 'profile/dashboard.html', talks=talks)", "def get(self):\n return {\n \"plans\": PLANS,\n }", "async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def desks():\n desks = Desk.query.all()\n return render_template('desks.html', desks=desks)", "def queryset(self, request):\n qs = super(ShortURLAdmin, self).queryset(request)\n if request.user.has_perm('deflect.list_all'):\n return qs\n return qs.filter(creator=request.user)", "def devs_as_json(cls):\n users = []\n for user in cls.filter(None, {'roles': 'Developer', 'iscommitter': 0}):\n username = user.username.plain()\n realname = user.realname.plain()\n if not realname:\n continue\n users.append([username, realname])\n return json.dumps(users, separators=(',',':'))", "def listerp(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n try:\n Programme.objects.order_by(\"id\")\n Prog_lister = Programme.objects.all()\n return render(request, 'esihapp/listp.html', locals())\n except KeyError:\n return render(request, 'esihapp/listp.html', locals())", "def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")", "def user_list():\n if session['user_admin'] == False:\n abort(403)\n\n # Retrieve all Users\n sqa_sess = sqa_session()\n users = sqa_sess.query(User).all()\n\n return render_template('admin/user_list.html', users=users)", "def get(self):\n authenticated_user_id = token_auth.current_user()\n orgs_dto = OrganisationService.get_organisations_managed_by_user_as_dto(\n authenticated_user_id\n )\n if len(orgs_dto.organisations) < 1:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n search_dto = self.setup_search_dto()\n admin_projects = ProjectAdminService.get_projects_for_admin(\n authenticated_user_id,\n request.environ.get(\"HTTP_ACCEPT_LANGUAGE\"),\n search_dto,\n )\n return admin_projects.to_primitive(), 200", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def _get_standalone_queryset(self, queryset):\n # (not used yet) To be iso LTI, admin and instructor can retrieve all video's livesession\n if permissions.IsParamsVideoAdminThroughOrganization().has_permission(\n self.request, self\n ):\n return queryset\n # use can get his related livesession\n return queryset.filter(user_id=self.request.user.id)", "def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset", "def user_plan(request, username):\n\n try:\n user = MiVotiUser.objects.get(username=username)\n except MiVotiUser.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if not user.gdrive_id_json_plan:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n # Obtener el plan\n dict_plan = gdrive_obtener_contenido_plan(user.gdrive_id_json_plan)\n return Response(dict_plan)\n elif request.method == 'POST':\n # Crear un nuevo Plan\n serializer = PlanEstudioUsuarioSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user)\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n # Eliminar el plan\n ruta_local = os.path.join('planes_json_cache', user.gdrive_id_json_plan)\n\n if os.path.exists(ruta_local):\n os.remove(ruta_local)\n\n archivo_plan_json = apps.get_app_config('planeador').g_drive.CreateFile(\n {'id': user.gdrive_id_json_plan})\n\n archivo_plan_json.Delete()\n\n user.gdrive_id_json_plan = None\n user.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def volunteers(request):\n volunteers = Volunteer.objects.all()\n return render(request, 'volunteers.html',\n {\"volunteers\" : volunteers})\n\n return render(request, 'edit-volunteer.html',\n {\"form\" : form})", "def show_admin():\n return render_admin_page(\"admin.html\")", "def update_plan_choisen():\n # SOLO USO PARA AMBIENTE EN DESARROLLO\n for client in Client.objects.all():\n try:\n plan_chosen = get_query_set_plan()\n plan_active = plan_chosen.filter(queryplansclient__client=client.id, is_active=True,\n queryplansclient__is_chosen=True)\n if plan_active:\n plan = QueryPlansAcquiredSerializer(plan_active[0])\n chosen_plan(client.id, plan.data)\n print(\"success\")\n print(\"empty\")\n except Exception as e:\n print(\"error\"+str(e))", "def plans(self, plans):\n\n self._plans = plans", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def show_priveleges(self):\n print(\"This user:\")\n for privelege in self.priveleges:\n print(privelege)", "def delegation_show(request, pk):\n delegation = Delegation.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(delegation_id=pk).order_by(\"committee__name\")\n\n context = {\"delegation\": delegation, \"delegates\": delegates, \"delegation_show\": True}\n template = \"jurycore/delegation_show.html\"\n return render(request, template, context)", "def display_user(cls):\n return cls.user_list", "def show():\n return render_template(\n 'listUsers.html',\n title='List Users',\n message='These are the users in our system'\n )", "def show_users():\n return 'hehe'", "def changelist_view(self, request, extra_context=None):\n if request.user.has_perm('deflect.list_all'):\n self.list_filter = self._list_filter + ('creator__username',)\n self.list_display = self._list_display + ('creator',)\n else:\n self.list_filter = self._list_filter\n self.list_display = self._list_display\n return super(ShortURLAdmin, self).changelist_view(request, extra_context=extra_context)", "def show_users():\r\n users = User.query.order_by(User.last_name,User.first_name).all()\r\n return render_template('list.html', users=users)", "def partners(request):\n return render(request, 'ecosystem/partners.html', {'page': 'partners'})", "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )", "def get(self, **kwargs):\n _plans = self._plans.query(**kwargs)\n\n if not _plans:\n raise PlanNotFoundError\n\n return _plans", "def workoutplan_detail(request, pk):\n template_data = {}\n plan = get_object_or_404(WorkoutPlan, pk=pk)\n workouts = Workout.objects.filter(workout_plan__id=pk).order_by('plan_week', 'plan_day')\n\n for workout in workouts:\n if workout.plan_user.filter(username=request.user.username).exists():\n template_data['following'] = True\n else:\n template_data['following'] = False\n\n if request.method == 'POST':\n form = StartingDateForm(request.POST)\n if form.is_valid():\n starting_date = form.cleaned_data.get('workout_day')\n for workout in workouts:\n if workout.plan_week == 1:\n if workout.plan_day == 1:\n workout.plan_user.add(request.user)\n workout.workout_day = starting_date\n workout.save()\n else:\n workout.plan_user.add(request.user)\n new_date = starting_date + timedelta(days = workout.plan_day - 1)\n workout.workout_day = new_date\n workout.save()\n else:\n new_day = 7 * (workout.plan_week - 1) + (workout.plan_day - 1)\n new_date = starting_date + timedelta(days = new_day)\n workout.plan_user.add(request.user)\n workout.workout_day = new_date\n workout.save()\n messages.success(request, f'This workout plan has been added to your calendar')\n return HttpResponseRedirect(reverse('workout-program-detail', kwargs={'pk': pk}))\n else:\n form = StartingDateForm()\n\n form = StartingDateForm()\n template_data['form'] = form\n template_data['workouts'] = workouts\n template_data['plan'] = plan\n\n return render(request, 'workouts/workoutplan_detail.html', template_data)", "def get_lists(self, request):\n target_user = User.objects.filter(email=request.POST['email'])\n if not target_user.exists():\n # In this case we don't want to return to the initial page\n return JsonResponse({\n 'msg': \"ERROR: The user doesn't exist\"\n })\n\n requests = Request.objects.get_active_by_user(target_user.first())\n borrowings = Borrowing.objects.get_active_by_user(target_user.first())\n html = render_to_string(\"include/hardware_admin_user.html\", {\n 'requests': requests,\n 'borrowings': borrowings\n })\n return JsonResponse({\n 'content': html\n })", "def display_departmentlist():\n\tdeptid = 0\n\tprint\n\tprint '[*] Fetching departments list'\n\n\t# call the api function\n\tsupportdepartments = whmcs.getsupportdepartments()\n\tif supportdepartments == None:\n\t\tprint '[x] WHMCS getsupportdepartments API function call failed.'\n\t\tprint '[!] exiting.'\n\t\t_exit(0)\n\n\t# reconnect if ssl or url error orccured\n\twhile supportdepartments == 'sslerror' or supportdepartments == 'urlerror':\n\t\tprint '[!] Re-establishing connection after 5 seconds'\n\t\ttry: time.sleep(5)\n\t\texcept KeyboardInterrupt: print '\\n[!] exiting.'; _exit()\n\t\tsupportdepartments = whmcs.getsupportdepartments()\n\n\tresult = supportdepartments.get('result')\n\ttotalresults = supportdepartments.get('totalresults')\n\tif result != 'success' or totalresults == 0:\n\t\tprint '[x] Unable to find any support departments on (%s).' % (parser.get('whmcs', 'server'))\n\t\tprint '[x] %s.' % supportdepartments.get('message')\n\t\t_exit()\n\n\t#############################\n\t## Display Department List ##\n\t#############################\n\t# Eg: {'departments': { 'department': [{'id': ,'name': ,'awaitingreply': ,'opentickets': ,}, {...}]}}\n\n\tdepartments = supportdepartments.get('departments').get('department')\n\trowformat = '| %-5s | %-20s | %-15s | %-15s |'\n\theader = ('ID', 'Department', 'Awaiting Reply', 'Open Tickets')\n\ttitle = rowformat % header\n\tprint '-' * len(title)\n\tprint title\n\tprint '-' * len(title)\n\tdeptlist = []\n\tfor department in departments:\n\t\tdeptid = department['id']\n\t\tdeptlist.append(deptid)\n\t\tdeptname=department['name']\n\t\tif len(deptname) > 20:\n\t\t\tdeptname = deptname[:20-4]+'...'\n\t\tprint rowformat % (deptid, deptname, department.get('awaitingreply'), department.get('opentickets'))\n\t\tprint '-' * len(title)\n\n\t# Display department ID selection prompt\n\twhile 1:\n\t\ttry:\n\t\t\tdeptid = raw_input('[+] Select Department ID: ')\n\t\texcept KeyboardInterrupt:\n\t\t\tprint '\\n[!] exiting.cleanly.'\n\t\t\texit()\n\n\t\tif type(deptid) != int and deptid not in deptlist:\n\t\t\tprint '[!] Invalid Department ID (%s).' % deptid\n\t\telse:\n\t\t\tbreak\n\treturn deptid", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def user_has_access(self, user):\n if not user: return False\n query = db.Query(TaskListMember)\n query.filter('task_list =', self)\n query.filter('user =', user)\n return query.get()", "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def AdminDashboard(user=None):\n\n\tif user == None:\n\t\tuser= defaultUser\n\n\ttable = user.htmlTable()\n\n\treturn render_template('adminDash.html', table=table, user = user.name)", "def user_details(request, user_id):\n user = User.objects.get(pk=user_id)\n maps = VisitingCards.objects.filter(user=user).filter(~Q(designation__title='Individual'))\n orgs = [m.organization for m in maps]\n designations = [m.designation for m in maps]\n\n # Remove None objects\n from operator import is_not\n from functools import partial\n designations = filter(partial(is_not, None),designations)\n orgs = list(set(orgs))\n\n return render_to_response('organization/user_details.html',\n {\"orgs\": orgs, \"maps\": maps, \"designations\": designations, \"puser\": user},\n context_instance=RequestContext(request))", "def test_listing_supplies_user(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testuser1)\n response = SupplyListView.as_view()(request)\n # normal user can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def list_assignmentypes_running(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'running', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=False, prof=prof).order_by('deadline_submission')\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def test_get_all_rate_plans(self):\n pass", "def view_assignment_list():\n\n if len(Assignments.assignments_list) == 0:\n Ui.print_message(\"Assignment list is empty\")\n else:\n Ui.print_assignments_list(Assignments.assignments_list, \"Assignments List:\")", "def get_plan(self):\n sub = self.get_subscription()\n return sub.plan", "def admin(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/admin/admin.html',\r\n context_instance=RequestContext(request,\r\n {\r\n 'title': 'Colmeia | Administrador',\r\n 'year': datetime.now().year,\r\n })\r\n )", "def index():\n view_dict = get_opentree_services_method_urls(request)\n view_dict['maintenance_info'] = get_maintenance_info(request)\n if auth.is_logged_in():\n # user is logged in, filter to their own collections by default?\n pass\n else:\n # anonymous visitor, show unfiltered list?\n pass\n\n return view_dict", "def get(self):\n\n usrs = get_mapviewers(24)\n for usr in usrs:\n logging.info(\"Mapviewer: \" + usr.loginuser);\n\n template_values = {\n 'mapviewers': usrs,\n }\n\n logging.info(\"Showusers visited.\")\n template = JINJA_ENVIRONMENT.get_template('showusers.html')\n self.response.write(template.render(template_values))", "def list_provisioned_product_plans_single_page(self, **kwargs):\n return slurp(\n 'list_provisioned_product_plans',\n self.list_provisioned_product_plans,\n 'ProvisionedProductPlans',\n **kwargs\n )", "def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')", "def human_print_plans(plans: List[Any]):\n # if plans is empty or none print no plans message\n if not plans:\n print('No plans')\n return\n\n name_pad = 5\n service_pad = 8\n for plan in plans:\n if len(plan['name']) > name_pad:\n name_pad = len(plan['name'])\n services_str = BackupServicePlan.service_list_to_str(plan['services'])\n if len(services_str) > service_pad:\n service_pad = len(services_str)\n\n name_pad += 1\n service_pad += 1\n header = f'{\"Name\":<{name_pad}} | # Tasks | {\"Services\":<{service_pad}} | Default'\n print(header)\n print('-' * (len(header) + 5))\n for plan in plans:\n task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0\n print(f'{plan[\"name\"]:<{name_pad}} | {task_len:<7} | '\n f'{BackupServicePlan.service_list_to_str(plan[\"services\"]):<{service_pad}} | '\n f'{(plan[\"default\"] if \"default\" in plan else False)!s}')", "def get_queryset(self):\n user = self.request.user\n if not (user.is_authenticated and user.check_permstring(\"builders\")):\n raise Http404(\"Not staff\")\n return super(IncompleteRosterListView, self).get_queryset()" ]
[ "0.7063593", "0.682502", "0.6374027", "0.6217655", "0.61145437", "0.610118", "0.6058615", "0.59781164", "0.5932294", "0.5915161", "0.59006524", "0.587544", "0.5792681", "0.57351124", "0.5670701", "0.5654031", "0.5596579", "0.5595037", "0.55796534", "0.55656326", "0.552241", "0.54659665", "0.54402494", "0.5438603", "0.5435412", "0.54005414", "0.5396313", "0.5357132", "0.53446543", "0.5324184", "0.5322619", "0.5320938", "0.531257", "0.531102", "0.5297453", "0.5296932", "0.5295956", "0.5295093", "0.5278356", "0.5248372", "0.52364653", "0.5203464", "0.51996243", "0.51929486", "0.5183525", "0.51679754", "0.5162576", "0.5155815", "0.515178", "0.51443964", "0.51267064", "0.5124073", "0.5116502", "0.5116166", "0.5101623", "0.50944096", "0.5082726", "0.5073755", "0.5070831", "0.5070692", "0.50588614", "0.5058105", "0.50539184", "0.50517607", "0.50457835", "0.5042673", "0.5034214", "0.50250053", "0.5024406", "0.5009973", "0.5008582", "0.500172", "0.49998155", "0.49880978", "0.49861458", "0.49797565", "0.49633974", "0.49632522", "0.4960426", "0.49545917", "0.49538642", "0.4951918", "0.4944552", "0.49389482", "0.49384874", "0.4938375", "0.49366635", "0.49360538", "0.4934486", "0.49337903", "0.49285936", "0.49262795", "0.49247938", "0.49234015", "0.49230492", "0.4912534", "0.4909368", "0.4905967", "0.48993087", "0.4898167" ]
0.6848171
1
View a list of development plans for active user
def get_all_development_plans_for_user(request): current_employee = Employee.objects.get(user__pk=request.user.pk) user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all() if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if user_development_plans: data={} user_development_plans_list = [] for plan in user_development_plans: development_plan_object_list=[] dev_plan = {} dev_plan["id"] = plan.id dev_plan["deleted"] = plan.deleted if plan.type: dev_plan["type"] = plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = plan).finished_at dev_plan["created_at"] = plan.created_at dev_plan["created_by"] = plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) manager_data = {} manager_data["manager_username"] = plan.manager_relation.user.username manager_data["id"] = plan.manager_relation.user.id development_plan_object_list.append({"manager_data":manager_data}) user_development_plans_list.append(development_plan_object_list) else: return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan" .format(request.user.pk)}, status=404) data = {"user_development_plans:": user_development_plans_list} return JsonResponse(status=201, data=data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plans():", "def list(cls):\n return cls().requests.get('plan')", "def plans(self):\r\n return pl.Plans(self)", "def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })", "def get_plans(self):\n return stripe.Plan.all()", "def plans(self):\r\n return Plans(self)", "def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def get(self):\n return {\n \"plans\": PLANS,\n }", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def __str__(self):\n return self.plan", "def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def user_project_view(cls, user, project):\r\n pass", "def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")", "def report_development(request):\n q = Q(app_status__name__iequals='Current Version') # actual?\n q = q | Q(app_status__name__iequals='In Development') # projected?\n q = q | Q(app_status__name__iequals='In Suspense') # supense\n q = q | Q(app_status__name__iequals='Unassigned') # TBD?\n apps = Application.objects.filter(q).values('release_date', 'release', 'acronym', 'sr_number', 'owner_org', 'nasa_requester', 'release_change_description', 'app_status__name').order_by('release_date', 'acronym', 'release')\n return render_to_response('report/app_pipeline_abbrev.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def list_plans(self, json_output: bool = False):\n plans, errors = self.rest.list_backup_plans()\n _exit_if_errors(errors)\n if json_output:\n print(json.dumps(plans, indent=2))\n else:\n self.human_print_plans(plans)", "def get_goals_todo_info(self, cr, uid, context=None):\n all_goals_info = []\n plan_obj = self.pool.get('gamification.goal.plan')\n\n plan_ids = plan_obj.search(cr, uid, [('user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in plan_obj.browse(cr, uid, plan_ids, context=context):\n # serialize goals info to be able to use it in javascript\n serialized_goals_info = {\n 'id': plan.id,\n 'name': plan.name,\n 'visibility_mode': plan.visibility_mode,\n }\n user = self.browse(cr, uid, uid, context=context)\n serialized_goals_info['currency'] = user.company_id.currency_id.id\n\n if plan.visibility_mode == 'board':\n # board report should be grouped by planline for all users\n goals_info = plan_obj.get_board_goal_info(cr, uid, plan, subset_goal_ids=False, context=context)\n\n if len(goals_info) == 0:\n # plan with no valid planlines\n continue\n\n serialized_goals_info['planlines'] = []\n for planline_board in goals_info:\n vals = {'type_name': planline_board['goal_type'].name,\n 'type_description': planline_board['goal_type'].description,\n 'type_condition': planline_board['goal_type'].condition,\n 'type_computation_mode': planline_board['goal_type'].computation_mode,\n 'type_monetary': planline_board['goal_type'].monetary,\n 'type_suffix': planline_board['goal_type'].suffix,\n 'type_action': True if planline_board['goal_type'].action_id else False,\n 'type_display': planline_board['goal_type'].display_mode,\n 'target_goal': planline_board['target_goal'],\n 'goals': []}\n for goal in planline_board['board_goals']:\n # Keep only the Top 3 and the current user\n if goal[0] > 2 and goal[1].user_id.id != uid:\n continue\n\n vals['goals'].append({\n 'rank': goal[0] + 1,\n 'id': goal[1].id,\n 'user_id': goal[1].user_id.id,\n 'user_name': goal[1].user_id.name,\n 'state': goal[1].state,\n 'completeness': goal[1].completeness,\n 'current': goal[1].current,\n 'target_goal': goal[1].target_goal,\n })\n if uid == goal[1].user_id.id:\n vals['own_goal_id'] = goal[1].id\n serialized_goals_info['planlines'].append(vals)\n\n else:\n # individual report are simply a list of goal\n goals_info = plan_obj.get_indivual_goal_info(cr, uid, uid, plan, subset_goal_ids=False, context=context)\n\n if not goals_info:\n continue\n\n serialized_goals_info['goals'] = []\n for goal in goals_info:\n serialized_goals_info['goals'].append({\n 'id': goal.id,\n 'type_name': goal.type_id.name,\n 'type_description': goal.type_description,\n 'type_condition': goal.type_id.condition,\n 'type_monetary': goal.type_id.monetary,\n 'type_suffix': goal.type_id.suffix,\n 'type_action': True if goal.type_id.action_id else False,\n 'type_display': goal.type_id.display_mode,\n 'state': goal.state,\n 'completeness': goal.completeness,\n 'computation_mode': goal.computation_mode,\n 'current': goal.current,\n 'target_goal': goal.target_goal,\n })\n\n all_goals_info.append(serialized_goals_info)\n return all_goals_info", "def __str__(self):\n return self.plan.title", "def report_development(request):\n apps = Application.objects.filter(app_status__name__icontains='Development').order_by('acronym', 'release')\n return render_to_response('application/search_results.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def get(self, **kwargs):\n _plans = self._plans.query(**kwargs)\n\n if not _plans:\n raise PlanNotFoundError\n\n return _plans", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def plans(request):\n results = Product.objects.filter(category__icontains='P')\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': results,\n 'stars': stars\n }\n if not results:\n messages.error(request, \"No plans as of yet, that will change soon!\")\n return redirect(reverse('products'))\n else:\n return render(request, \"products.html\", context)", "def user_project_view(cls, user, project):\n pass", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def get_and_check_plan(request, company):\n model = PlanModel(request.session)\n guid = request.matchdict['plan_guid']\n plan = model.get(guid)\n if plan is None:\n raise HTTPNotFound('No such plan {}'.format(guid))\n if plan.company_guid != company.guid:\n raise HTTPForbidden('You have no permission to access plan {}'\n .format(guid))\n return plan", "def developer_dahsboard(request):\r\n\tif not request.user.profile.is_developer:\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\tgames = Game.objects.filter(developer = request.user.profile)\r\n\treturn render(request, \"dashboard.html\", {'MEDIA_URL': settings.MEDIA_URL, 'games': games})", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def getPlan(self):\n return StripePlan(self.base.get(\"plan\", []))", "def list_departments():\n \t check_admin()\n\n #check all the departments in the database and assign them to a variable.departments \n \t departments = Department.query.all()\n\n \t return render_template('admin/departments/departments.html',departments = departments,title = \"Departments\")", "def get(self):\n return GetListOfSavingPlan(current_user.id)", "def usersview_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'usersview_admin.jinja2',\n users=users\n )", "def index(request): \n \n all_projects = models.Project.objects.all()\n projects = get_objects_for_user(request.user, 'view_project', all_projects)\n \n fbads_settings = FacebookAdsSettings.objects.first()\n return render_to_response('index.html',{\n 'projects': projects, \n 'fbads_settings': fbads_settings},\n context_instance=RequestContext(request))", "def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)", "def show(self):\n return self._project.show()", "def test_get_all_rate_plans(self):\n pass", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def list_assignmentypes_running(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'running', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=False, prof=prof).order_by('deadline_submission')\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def get_plan(self):\n sub = self.get_subscription()\n return sub.plan", "def index():\n view_dict = get_opentree_services_method_urls(request)\n view_dict['maintenance_info'] = get_maintenance_info(request)\n if auth.is_logged_in():\n # user is logged in, filter to their own collections by default?\n pass\n else:\n # anonymous visitor, show unfiltered list?\n pass\n\n return view_dict", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def list_provisioned_product_plans_single_page(self, **kwargs):\n return slurp(\n 'list_provisioned_product_plans',\n self.list_provisioned_product_plans,\n 'ProvisionedProductPlans',\n **kwargs\n )", "def plan_list_post(request):\n company = auth_api_key(request)\n form = validate_form(PlanCreateForm, request)\n \n plan_type = form.data['plan_type']\n amount = form.data['amount']\n frequency = form.data['frequency']\n interval = form.data['interval']\n if interval is None:\n interval = 1\n company_guid = company.guid\n\n # TODO: make sure user cannot create a post to a deleted company\n\n model = PlanModel(request.session)\n type_map = dict(\n charge=model.TYPE_CHARGE,\n payout=model.TYPE_PAYOUT,\n )\n plan_type = type_map[plan_type]\n freq_map = dict(\n daily=model.FREQ_DAILY,\n weekly=model.FREQ_WEEKLY,\n monthly=model.FREQ_MONTHLY,\n yearly=model.FREQ_YEARLY,\n )\n frequency = freq_map[frequency]\n\n with db_transaction.manager:\n guid = model.create(\n company_guid=company_guid, \n plan_type=plan_type,\n amount=amount, \n frequency=frequency, \n interval=interval, \n )\n plan = model.get(guid)\n return plan", "def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")", "def list_applications(request):\n projs = Project.objects.filter(Q(applications__Student=request.user) | Q(distributions__Student=request.user)).distinct()\n projlist = []\n for proj in projs:\n try:\n a = proj.applications.get(Student=request.user)\n except Application.DoesNotExist:\n a = None\n try:\n d = proj.distributions.get(Student=request.user)\n except Distribution.DoesNotExist:\n d = None\n x = {'project': proj, 'application': a, 'distribution': d}\n projlist.append(x)\n\n return render(request, \"students/list_applications.html\", context={\n 'projlist': projlist\n })", "def my_dashboard_print(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_assoc.ProjectAssocTable_Printable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_assoc.TaskAssocTable_Printable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard_printable.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n },\n )", "def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html", "def update_plan_choisen():\n # SOLO USO PARA AMBIENTE EN DESARROLLO\n for client in Client.objects.all():\n try:\n plan_chosen = get_query_set_plan()\n plan_active = plan_chosen.filter(queryplansclient__client=client.id, is_active=True,\n queryplansclient__is_chosen=True)\n if plan_active:\n plan = QueryPlansAcquiredSerializer(plan_active[0])\n chosen_plan(client.id, plan.data)\n print(\"success\")\n print(\"empty\")\n except Exception as e:\n print(\"error\"+str(e))", "def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")", "def model_plan_feature(cfg, model, developer_gen):\n model.ticket = cfg[\"ticket_id_template\"] % (choose_in(1, cfg[\"repo_age_in_days\"]),)\n model.planned = choose_in(1, cfg[\"max_commits_per_branch\"])\n model.developer = next(developer_gen)\n return model", "def index(request):\n user_data_list = UserData.objects.all().order_by('environment')\n context = {'user_data_list': user_data_list}\n\n return render(request, 'index.html', context)", "def dashboard():\n # TODO: Optionally, old proposals should be shown in a read-only mode.\n talks = Talk.query.current.filter(Talk.user == current_user)\n return render_template(\n 'profile/dashboard.html', talks=talks)", "def desks():\n desks = Desk.query.all()\n return render_template('desks.html', desks=desks)", "def human_print_plan(plan: object):\n print(f'Name: {plan[\"name\"]}')\n print(f'Description: {plan[\"description\"] if \"description\" in plan else \"N/A\"}')\n print(f'Services: {BackupServicePlan.service_list_to_str(plan[\"services\"])}')\n print(f'Default: {(plan[\"default\"] if \"deafult\" in plan else False)!s}')\n\n # If the are no tasks return\n if not plan[\"tasks\"]:\n return\n\n print()\n print('Tasks:')\n task_name_pad = 5\n schedule_pad = 10\n for task in plan['tasks']:\n if len(task['name']) > task_name_pad:\n task_name_pad = len(task['name'])\n\n task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])\n if len(task['schedule_str']) > schedule_pad:\n schedule_pad = len(task['schedule_str'])\n\n task_name_pad += 1\n schedule_pad += 1\n\n header = f'{\"Name\":<{task_name_pad}} | {\"Schedule\":<{schedule_pad}} | Options'\n print(header)\n print('-' * (len(header) + 5))\n\n for task in plan['tasks']:\n options = BackupServicePlan.format_options(task)\n print(f'{task[\"name\"]:<{task_name_pad}} | {task[\"schedule_str\"]:<{schedule_pad}} | {options}')", "def show():\n info(str(Project))", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def plans(self, plans):\n\n self._plans = plans", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def home(request):\n projects_newest = Project.approved_projects().all().select_related(\"screenshot\").order_by('-id')[:10]\n projects_newest = [project for project in projects_newest]\n return render(request, 'home.html', {\n \"form\": AuthenticationForm(),\n 'projects_popular': projects_newest,\n 'projects_newest': projects_newest\n })", "def departments(request):\n if 'selected_package' in request.session:\n del request.session['selected_package']\n assert isinstance(request, HttpRequest)\n status, result = api.show_departments()\n return render(\n request,\n 'app/departments.html',\n {\n 'title': 'แผนกและแพ็คเกจ',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )", "def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))", "def _get_standalone_queryset(self, queryset):\n # (not used yet) To be iso LTI, admin and instructor can retrieve all video's livesession\n if permissions.IsParamsVideoAdminThroughOrganization().has_permission(\n self.request, self\n ):\n return queryset\n # use can get his related livesession\n return queryset.filter(user_id=self.request.user.id)", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")", "def view_approved():\n global approved\n global appr_ind\n appr = approved.get_all_values()\n headings = appr[0]\n first_appl = appr[appr_ind]\n for head, app in zip(headings, first_appl):\n head = head.ljust(15, ' ')\n print(f'{head} {app}')\n keep_viewing = True\n while keep_viewing:\n view_next = input('\\nPress V to view next, Q to quit, M for main '\n 'menu.\\n')\n if view_next.lower() == 'q':\n logout()\n elif view_next.lower() == 'v':\n appr_ind += 1\n if appr_ind < len(appr):\n print('Next approved application: \\n')\n view_approved()\n else:\n print('\\nNo more approved applications to view \\n')\n keep_viewing = False\n next_action()\n elif view_next.lower() == 'm':\n keep_viewing = False\n hr_main()\n break\n else:\n is_invalid()", "def KLP_Users_list(request):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions, to get user list\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get all active(1) users list other than staff and super user order by username\n\n user_list = User.objects.filter(is_staff=0,\n is_superuser=0).order_by('username')\n\n # render show users form with users list\n\n return render_to_response('viewtemplates/show_users_form.html',\n {\n 'user_list': user_list,\n 'user': user,\n 'title': 'KLP Users',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')", "def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)", "def describe_provisioned_product_plan_single_page(self, **kwargs):\n return slurp(\n 'describe_provisioned_product_plan',\n self.describe_provisioned_product_plan,\n 'ProvisionedProductPlanDetails',\n **kwargs\n )", "def plan(self):\n raise NotImplementedError('You must implement the plan() method '\n 'yourself!')", "def changelist_view(self, request, extra_context=None):\n if request.user.user_type == User.ADMIN_CEA:\n self.list_display = ('user', 'cea', 'booking', 'request_status')\n elif request.user.user_type == User.ADMIN_CRC:\n self.list_display = ('user', 'crc', 'booking', 'request_status')\n elif request.user.user_type == User.EXPRESS_USER:\n self.list_display = ('user', 'payment_type', 'request_status', 'credit_status', 'booking')\n else:\n self.list_display = ('user', 'booking','cea', 'crc', 'transit', 'payment_type', 'request_status',)\n return super(RequestAdmin, self).changelist_view(request, extra_context)", "def human_print_plans(plans: List[Any]):\n # if plans is empty or none print no plans message\n if not plans:\n print('No plans')\n return\n\n name_pad = 5\n service_pad = 8\n for plan in plans:\n if len(plan['name']) > name_pad:\n name_pad = len(plan['name'])\n services_str = BackupServicePlan.service_list_to_str(plan['services'])\n if len(services_str) > service_pad:\n service_pad = len(services_str)\n\n name_pad += 1\n service_pad += 1\n header = f'{\"Name\":<{name_pad}} | # Tasks | {\"Services\":<{service_pad}} | Default'\n print(header)\n print('-' * (len(header) + 5))\n for plan in plans:\n task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0\n print(f'{plan[\"name\"]:<{name_pad}} | {task_len:<7} | '\n f'{BackupServicePlan.service_list_to_str(plan[\"services\"]):<{service_pad}} | '\n f'{(plan[\"default\"] if \"default\" in plan else False)!s}')", "def plan(self, plan_code):\r\n return pl.Plan(self, plan_code)", "def view():\n login_dict = _open_cnfg()\n login_name, login_url, login_api, login_hid = ['Login name'], ['URL'], ['API key'], ['History ID']\n for lgn in login_dict['logins']:\n login_name.append(lgn)\n login_url.append(login_dict['logins'][lgn]['url'])\n login_api.append(login_dict['logins'][lgn]['api_key'])\n login_hid.append(login_dict['logins'][lgn]['hid'])\n click.echo(\"You are currently using active login: \" + click.style(login_dict['active_login'], bold=True))\n utils._tabulate([login_name, login_url, login_api, login_hid])", "def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset", "def view(self, parent, **kargs):\n design = Service('Design')\n return design.view_list(parent, self, **kargs)", "def display_departmentlist():\n\tdeptid = 0\n\tprint\n\tprint '[*] Fetching departments list'\n\n\t# call the api function\n\tsupportdepartments = whmcs.getsupportdepartments()\n\tif supportdepartments == None:\n\t\tprint '[x] WHMCS getsupportdepartments API function call failed.'\n\t\tprint '[!] exiting.'\n\t\t_exit(0)\n\n\t# reconnect if ssl or url error orccured\n\twhile supportdepartments == 'sslerror' or supportdepartments == 'urlerror':\n\t\tprint '[!] Re-establishing connection after 5 seconds'\n\t\ttry: time.sleep(5)\n\t\texcept KeyboardInterrupt: print '\\n[!] exiting.'; _exit()\n\t\tsupportdepartments = whmcs.getsupportdepartments()\n\n\tresult = supportdepartments.get('result')\n\ttotalresults = supportdepartments.get('totalresults')\n\tif result != 'success' or totalresults == 0:\n\t\tprint '[x] Unable to find any support departments on (%s).' % (parser.get('whmcs', 'server'))\n\t\tprint '[x] %s.' % supportdepartments.get('message')\n\t\t_exit()\n\n\t#############################\n\t## Display Department List ##\n\t#############################\n\t# Eg: {'departments': { 'department': [{'id': ,'name': ,'awaitingreply': ,'opentickets': ,}, {...}]}}\n\n\tdepartments = supportdepartments.get('departments').get('department')\n\trowformat = '| %-5s | %-20s | %-15s | %-15s |'\n\theader = ('ID', 'Department', 'Awaiting Reply', 'Open Tickets')\n\ttitle = rowformat % header\n\tprint '-' * len(title)\n\tprint title\n\tprint '-' * len(title)\n\tdeptlist = []\n\tfor department in departments:\n\t\tdeptid = department['id']\n\t\tdeptlist.append(deptid)\n\t\tdeptname=department['name']\n\t\tif len(deptname) > 20:\n\t\t\tdeptname = deptname[:20-4]+'...'\n\t\tprint rowformat % (deptid, deptname, department.get('awaitingreply'), department.get('opentickets'))\n\t\tprint '-' * len(title)\n\n\t# Display department ID selection prompt\n\twhile 1:\n\t\ttry:\n\t\t\tdeptid = raw_input('[+] Select Department ID: ')\n\t\texcept KeyboardInterrupt:\n\t\t\tprint '\\n[!] exiting.cleanly.'\n\t\t\texit()\n\n\t\tif type(deptid) != int and deptid not in deptlist:\n\t\t\tprint '[!] Invalid Department ID (%s).' % deptid\n\t\telse:\n\t\t\tbreak\n\treturn deptid", "def show_applicants():\n data_list = queries2.applicants()[0]\n table_titles = queries2.applicants()[1]\n title = \"Applicants\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)", "def devlist_handler(userdata, *args):\n\t\tfor (dev, connected) in database.devlist(userdata[\"cursor\"]):\n\t\t\tif dev == \"devmaster\":\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif connected:\n\t\t\t\tprint(shlex.quote(\"+\" + dev), end=\" \")\n\t\t\telse:\n\t\t\t\tprint(shlex.quote(\"-\" + dev), end=\" \")\n\t\t\n\t\tprint()", "def provisioned_plans(self):\n if \"provisionedPlans\" in self._prop_dict:\n return ProvisionedPlansCollectionPage(self._prop_dict[\"provisionedPlans\"])\n else:\n return None", "def provisioned_plans(self):\n if \"provisionedPlans\" in self._prop_dict:\n return ProvisionedPlansCollectionPage(self._prop_dict[\"provisionedPlans\"])\n else:\n return None", "def provisioned_plans(self):\n if \"provisionedPlans\" in self._prop_dict:\n return ProvisionedPlansCollectionPage(self._prop_dict[\"provisionedPlans\"])\n else:\n return None", "def user_plan(request, username):\n\n try:\n user = MiVotiUser.objects.get(username=username)\n except MiVotiUser.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if not user.gdrive_id_json_plan:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n # Obtener el plan\n dict_plan = gdrive_obtener_contenido_plan(user.gdrive_id_json_plan)\n return Response(dict_plan)\n elif request.method == 'POST':\n # Crear un nuevo Plan\n serializer = PlanEstudioUsuarioSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user)\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n # Eliminar el plan\n ruta_local = os.path.join('planes_json_cache', user.gdrive_id_json_plan)\n\n if os.path.exists(ruta_local):\n os.remove(ruta_local)\n\n archivo_plan_json = apps.get_app_config('planeador').g_drive.CreateFile(\n {'id': user.gdrive_id_json_plan})\n\n archivo_plan_json.Delete()\n\n user.gdrive_id_json_plan = None\n user.save()\n\n return Response(status=status.HTTP_204_NO_CONTENT)", "def check_internal_api_for_subscription(namespace_user):\n plans = []\n if namespace_user.organization:\n query = organization_skus.get_org_subscriptions(namespace_user.id)\n org_subscriptions = list(query.dicts()) if query is not None else []\n for subscription in org_subscriptions:\n subscription_id = subscription[\"subscription_id\"]\n sku = marketplace_subscriptions.get_subscription_sku(subscription_id)\n plans.append(get_plan_using_rh_sku(sku))\n pass\n else:\n user_account_number = marketplace_users.get_account_number(namespace_user)\n if user_account_number:\n plans = marketplace_subscriptions.get_list_of_subscriptions(\n user_account_number, filter_out_org_bindings=True, convert_to_stripe_plans=True\n )\n return plans", "def personal(request):\n \n visits = Visit.objects.filter(user=request.user)\n \n \n return render(request, \"personal.html\", {'visits':visits})", "def listerp(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n try:\n Programme.objects.order_by(\"id\")\n Prog_lister = Programme.objects.all()\n return render(request, 'esihapp/listp.html', locals())\n except KeyError:\n return render(request, 'esihapp/listp.html', locals())", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)" ]
[ "0.69843084", "0.6867608", "0.6754407", "0.65818614", "0.6510022", "0.64818734", "0.62669057", "0.6181612", "0.6179575", "0.6045006", "0.6030298", "0.60151047", "0.60096705", "0.59797555", "0.5899104", "0.5885478", "0.58685434", "0.5831174", "0.5770537", "0.57329553", "0.5725271", "0.5691866", "0.56607217", "0.5653614", "0.55828154", "0.5575905", "0.5567765", "0.5548165", "0.55380464", "0.5536073", "0.55331254", "0.55188435", "0.55187595", "0.5454833", "0.54547864", "0.54441875", "0.54233414", "0.5417927", "0.541509", "0.5410832", "0.5374278", "0.53627867", "0.5357163", "0.5340075", "0.53334177", "0.53308064", "0.5330589", "0.5320902", "0.53017884", "0.5285477", "0.5273689", "0.5268762", "0.5265785", "0.52584136", "0.525716", "0.524758", "0.5245533", "0.5231802", "0.5223526", "0.5222911", "0.52192086", "0.5213532", "0.521097", "0.5208144", "0.52043205", "0.518716", "0.51740336", "0.51683", "0.5162014", "0.51584345", "0.51572156", "0.51491255", "0.5144768", "0.51413447", "0.51401687", "0.5125306", "0.51237464", "0.5123154", "0.5116206", "0.51135343", "0.5104505", "0.51003814", "0.5095931", "0.5091153", "0.5089379", "0.5085056", "0.50848484", "0.5069092", "0.50617623", "0.50587946", "0.5051753", "0.50509477", "0.5041138", "0.5041138", "0.5041138", "0.50356954", "0.50351256", "0.5029286", "0.5028902", "0.5026502" ]
0.7084471
0
View active development plan for active user
def get_active_development_plan_for_user(request): current_employee = Employee.objects.get(user__pk=request.user.pk) current_development_plan = DevelopmentPlan.objects.filter( employee_relation=current_employee, employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!! if not current_employee: raise PermissionDenied() if current_development_plan: data={} development_plan_object_list=[] dev_plan={} dev_plan["id"] = current_development_plan.id dev_plan["deleted"] = current_development_plan.deleted if current_development_plan.type: dev_plan["type"] = current_development_plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = current_development_plan)\ .finished_at dev_plan["created_at"] = current_development_plan.created_at dev_plan["created_by"] = current_development_plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) # manager_relation manager_data={} manager_data["manager_username"] = current_development_plan.manager_relation.user.username manager_data["manager_first_name"] = current_development_plan.manager_relation.user.first_name manager_data["manager_last_name"] = current_development_plan.manager_relation.user.last_name development_plan_object_list.append({"manager_data":manager_data}) # employee_relation employee_data={} all_employees = current_development_plan.employee_relation.all() if all_employees: emp_list=[] for emp in all_employees: emp_data={} emp_data["id"] = emp.user.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["status_questions"] = emp.status_questions employee_role = EmployeeRole.objects.filter(employee=emp).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) emp_data["roles"] = name_role_list emp_list.append(emp_data) employee_data={"all_employees":emp_list} else: return JsonResponse(data={"details":"Any employee has Development Plan with id={}" .format(current_development_plan.id)}, status=404) development_plan_object_list.append({"employee_data":employee_data}) # competence_parts all_competence_parts = current_development_plan.competence_parts.all() competence_list = [] questions_list = [] sliders_list = [] if all_competence_parts: for comp_part in all_competence_parts: comp_part_data={} competence_d={"competence_parts": []} comp_part_data["id"] = comp_part.id comp_part_data["title"] = comp_part.title comp_part_data["description"] = comp_part.description comp_part_data["competence_status"] = comp_part.competence_status all_questions = comp_part.question_set.all() print all_questions if all_questions: for question in all_questions: question_data = {} question_data["question_id"] = question.id question_data["title"] = question.title question_data["competence_part"] = question.competence_part.id answer = Answer.objects.filter(question__id = question.id, employee=current_employee).first() if answer: question_data["answer_id"] = answer.id question_data["answer"] = answer.title questions_list.append(question_data) comp_part_data["questions"] = questions_list all_sliders = comp_part.slider_set.all() if all_sliders: for slider in all_sliders: slider_data = {} slider_data["slider_id"] = slider.id slider_data["scale"] = slider.scale slider_data["competence_part"] = slider.competence_part.id answer = Answer.objects.filter(slider__id = slider.id, employee=current_employee).first() if slider: slider_data["answer_id"] = answer.id slider_data["answer"] = answer.slider.scale sliders_list.append(slider_data) comp_part_data["sliders"] = sliders_list comp_part_data["created_at"] = comp_part.created_at comp_part_data["created_by"] = comp_part.created_by.username comp_part_data["updated_at"] = comp_part.updated_at comp_part_data["updated_by"] = comp_part.updated_by.username competence_keys_list = ['id', 'title', 'description', 'language_code', 'status'] if not competence_list: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) else: competence_found = False for competence_dict in competence_list: if competence_dict['id'] == comp_part.competence.id: competence_dict['competence_parts'].append(comp_part_data) competence_found = True break if not competence_found: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) development_plan_object_list.append({"competences":competence_list}) else: return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet" .format(current_development_plan.id)}, status=404) data = {"dev_plan:": development_plan_object_list} return JsonResponse(status=201, data=data) else: return JsonResponse(data={"details": "The user with id={} doesn't have an active Development Plan" .format(current_employee.user.id)}, status=404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)", "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def plans():", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def show():\n info(str(Project))", "def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def user_project_view(cls, user, project):\r\n pass", "def __str__(self):\n return self.plan.title", "def __str__(self):\n return self.plan", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def get_plan(self):\n sub = self.get_subscription()\n return sub.plan", "def get_and_check_plan(request, company):\n model = PlanModel(request.session)\n guid = request.matchdict['plan_guid']\n plan = model.get(guid)\n if plan is None:\n raise HTTPNotFound('No such plan {}'.format(guid))\n if plan.company_guid != company.guid:\n raise HTTPForbidden('You have no permission to access plan {}'\n .format(guid))\n return plan", "def show(self):\n return self._project.show()", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def user_project_view(cls, user, project):\n pass", "def view():\n login_dict = _open_cnfg()\n login_name, login_url, login_api, login_hid = ['Login name'], ['URL'], ['API key'], ['History ID']\n for lgn in login_dict['logins']:\n login_name.append(lgn)\n login_url.append(login_dict['logins'][lgn]['url'])\n login_api.append(login_dict['logins'][lgn]['api_key'])\n login_hid.append(login_dict['logins'][lgn]['hid'])\n click.echo(\"You are currently using active login: \" + click.style(login_dict['active_login'], bold=True))\n utils._tabulate([login_name, login_url, login_api, login_hid])", "def my_dashboard_print(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_assoc.ProjectAssocTable_Printable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_assoc.TaskAssocTable_Printable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard_printable.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n },\n )", "def currently_in_development_page(request, model='', header='', form_data=None):\n html = render_to_string(\"currently_in_development.html\")\n return html", "def list(cls):\n return cls().requests.get('plan')", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def plan(self):\n raise NotImplementedError('You must implement the plan() method '\n 'yourself!')", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def plans(self):\r\n return pl.Plans(self)", "def dashboard():\n # TODO: Optionally, old proposals should be shown in a read-only mode.\n talks = Talk.query.current.filter(Talk.user == current_user)\n return render_template(\n 'profile/dashboard.html', talks=talks)", "def getPlan(self):\n return StripePlan(self.base.get(\"plan\", []))", "def pricing_plan(self) -> str:\n return pulumi.get(self, \"pricing_plan\")", "def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })", "def profile(request):\n # Load last 5 orders as preview\n orders = Order._default_manager.filter(user=request.user)[0:5]\n return render(request, 'oscar/customer/profile.html', locals())", "def view_user(self):\n\n logged_in = authenticated_userid(self.request)\n return {\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n }", "def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def report_development(request):\n q = Q(app_status__name__iequals='Current Version') # actual?\n q = q | Q(app_status__name__iequals='In Development') # projected?\n q = q | Q(app_status__name__iequals='In Suspense') # supense\n q = q | Q(app_status__name__iequals='Unassigned') # TBD?\n apps = Application.objects.filter(q).values('release_date', 'release', 'acronym', 'sr_number', 'owner_org', 'nasa_requester', 'release_change_description', 'app_status__name').order_by('release_date', 'acronym', 'release')\n return render_to_response('report/app_pipeline_abbrev.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def dashboard():\n # Get current user\n user = current_user\n # Get tip of the day\n tip = gdb.gettipofday()\n # Get current user Leaderboard Status\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('dashboard.html', user=user,\n leaderboard=leaderboard,\n userbalance=current_user.balance, tip=tip,\n current_user_info=current_user_info)", "def human_print_plan(plan: object):\n print(f'Name: {plan[\"name\"]}')\n print(f'Description: {plan[\"description\"] if \"description\" in plan else \"N/A\"}')\n print(f'Services: {BackupServicePlan.service_list_to_str(plan[\"services\"])}')\n print(f'Default: {(plan[\"default\"] if \"deafult\" in plan else False)!s}')\n\n # If the are no tasks return\n if not plan[\"tasks\"]:\n return\n\n print()\n print('Tasks:')\n task_name_pad = 5\n schedule_pad = 10\n for task in plan['tasks']:\n if len(task['name']) > task_name_pad:\n task_name_pad = len(task['name'])\n\n task['schedule_str'] = BackupServicePlan.format_schedule(task['schedule'])\n if len(task['schedule_str']) > schedule_pad:\n schedule_pad = len(task['schedule_str'])\n\n task_name_pad += 1\n schedule_pad += 1\n\n header = f'{\"Name\":<{task_name_pad}} | {\"Schedule\":<{schedule_pad}} | Options'\n print(header)\n print('-' * (len(header) + 5))\n\n for task in plan['tasks']:\n options = BackupServicePlan.format_options(task)\n print(f'{task[\"name\"]:<{task_name_pad}} | {task[\"schedule_str\"]:<{schedule_pad}} | {options}')", "def plan(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"plan\")", "def model_plan_feature(cfg, model, developer_gen):\n model.ticket = cfg[\"ticket_id_template\"] % (choose_in(1, cfg[\"repo_age_in_days\"]),)\n model.planned = choose_in(1, cfg[\"max_commits_per_branch\"])\n model.developer = next(developer_gen)\n return model", "def personal(request):\n \n visits = Visit.objects.filter(user=request.user)\n \n \n return render(request, \"personal.html\", {'visits':visits})", "def OnSolutionCallback(self):\n self.total_plans += 1\n print('Feasible Project Plan #{c}:'.format(c=self.total_plans))\n for idx in range(0, len(self.p_)):\n if self.Value(self.p_vars_[idx]):\n print(' - Project ID: {p} (Cost={c}, Value={v})'.format(\n p=(idx + 1), c=self.p_[idx][4], v=self.p_[idx][3]))\n print(' - Total Cost : {c}'.format(c=self.Value(self.total_cost_)))\n print(' - Total Value : {v}'.format(v=self.Value(self.total_value_)))", "def plans(self):\r\n return Plans(self)", "def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")", "def get_plans(self):\n return stripe.Plan.all()", "def plans(request):\n results = Product.objects.filter(category__icontains='P')\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': results,\n 'stars': stars\n }\n if not results:\n messages.error(request, \"No plans as of yet, that will change soon!\")\n return redirect(reverse('products'))\n else:\n return render(request, \"products.html\", context)", "def view_approved():\n global approved\n global appr_ind\n appr = approved.get_all_values()\n headings = appr[0]\n first_appl = appr[appr_ind]\n for head, app in zip(headings, first_appl):\n head = head.ljust(15, ' ')\n print(f'{head} {app}')\n keep_viewing = True\n while keep_viewing:\n view_next = input('\\nPress V to view next, Q to quit, M for main '\n 'menu.\\n')\n if view_next.lower() == 'q':\n logout()\n elif view_next.lower() == 'v':\n appr_ind += 1\n if appr_ind < len(appr):\n print('Next approved application: \\n')\n view_approved()\n else:\n print('\\nNo more approved applications to view \\n')\n keep_viewing = False\n next_action()\n elif view_next.lower() == 'm':\n keep_viewing = False\n hr_main()\n break\n else:\n is_invalid()", "def display_accounts_details():\n return Records.display_records()", "def get(self):\n return {\n \"plans\": PLANS,\n }", "def report_development(request):\n apps = Application.objects.filter(app_status__name__icontains='Development').order_by('acronym', 'release')\n return render_to_response('application/search_results.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def account_dashboard(request):\n\n user_id = request.user.id\n orders = Order.objects.filter(user_id=user_id, billing_status=True)\n\n return render(request, 'account/user/dashboard.html', {'orders': orders})", "def show_user():\n\n return render_template('user/show_by_user.html', title='Show Profile', user = current_user)", "def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html", "def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)", "def deploy_plan(plan_name):\n pass", "def home(request):\n projects_newest = Project.approved_projects().all().select_related(\"screenshot\").order_by('-id')[:10]\n projects_newest = [project for project in projects_newest]\n return render(request, 'home.html', {\n \"form\": AuthenticationForm(),\n 'projects_popular': projects_newest,\n 'projects_newest': projects_newest\n })", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def show_priveleges(self):\n print(\"This user:\")\n for privelege in self.priveleges:\n print(privelege)", "def display_accounts_details():\n return Credentials.display_credentials()", "def plan(self, plan_code):\r\n return pl.Plan(self, plan_code)", "def show_current_vdc(self):\n if self._check_for_7k():\n self.get_current_vdc()\n print self.current_vdc", "def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def plan(self):\n\n self.logger.info(\"*** start planning......\")\n\n request_list = self.dbh.get_requests()\n\n if len(request_list) > 0:\n if not self._handle_requests(request_list):\n self.logger.error(\"while planning\")\n return False\n else:\n self.logger.error(\"while reading plan\")\n return False\n\n return True", "def scheduled_plan(self):\n return self._scheduled_plan", "def developer_dahsboard(request):\r\n\tif not request.user.profile.is_developer:\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\tgames = Game.objects.filter(developer = request.user.profile)\r\n\treturn render(request, \"dashboard.html\", {'MEDIA_URL': settings.MEDIA_URL, 'games': games})", "def index(request): \n \n all_projects = models.Project.objects.all()\n projects = get_objects_for_user(request.user, 'view_project', all_projects)\n \n fbads_settings = FacebookAdsSettings.objects.first()\n return render_to_response('index.html',{\n 'projects': projects, \n 'fbads_settings': fbads_settings},\n context_instance=RequestContext(request))", "def my_dashboard(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_proj.ProjectAssocAjaxTable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_task.TaskAssocAjaxTable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n 'project_source' : 'data-dashboard-project-upcoming',\n 'task_source' : 'data-dashboard-task-upcoming',\n 'input_id' : user_con.pk,\n 'print_url':reverse_lazy('my-dashboard-print'),\n },\n )", "def get_goals_todo_info(self, cr, uid, context=None):\n all_goals_info = []\n plan_obj = self.pool.get('gamification.goal.plan')\n\n plan_ids = plan_obj.search(cr, uid, [('user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in plan_obj.browse(cr, uid, plan_ids, context=context):\n # serialize goals info to be able to use it in javascript\n serialized_goals_info = {\n 'id': plan.id,\n 'name': plan.name,\n 'visibility_mode': plan.visibility_mode,\n }\n user = self.browse(cr, uid, uid, context=context)\n serialized_goals_info['currency'] = user.company_id.currency_id.id\n\n if plan.visibility_mode == 'board':\n # board report should be grouped by planline for all users\n goals_info = plan_obj.get_board_goal_info(cr, uid, plan, subset_goal_ids=False, context=context)\n\n if len(goals_info) == 0:\n # plan with no valid planlines\n continue\n\n serialized_goals_info['planlines'] = []\n for planline_board in goals_info:\n vals = {'type_name': planline_board['goal_type'].name,\n 'type_description': planline_board['goal_type'].description,\n 'type_condition': planline_board['goal_type'].condition,\n 'type_computation_mode': planline_board['goal_type'].computation_mode,\n 'type_monetary': planline_board['goal_type'].monetary,\n 'type_suffix': planline_board['goal_type'].suffix,\n 'type_action': True if planline_board['goal_type'].action_id else False,\n 'type_display': planline_board['goal_type'].display_mode,\n 'target_goal': planline_board['target_goal'],\n 'goals': []}\n for goal in planline_board['board_goals']:\n # Keep only the Top 3 and the current user\n if goal[0] > 2 and goal[1].user_id.id != uid:\n continue\n\n vals['goals'].append({\n 'rank': goal[0] + 1,\n 'id': goal[1].id,\n 'user_id': goal[1].user_id.id,\n 'user_name': goal[1].user_id.name,\n 'state': goal[1].state,\n 'completeness': goal[1].completeness,\n 'current': goal[1].current,\n 'target_goal': goal[1].target_goal,\n })\n if uid == goal[1].user_id.id:\n vals['own_goal_id'] = goal[1].id\n serialized_goals_info['planlines'].append(vals)\n\n else:\n # individual report are simply a list of goal\n goals_info = plan_obj.get_indivual_goal_info(cr, uid, uid, plan, subset_goal_ids=False, context=context)\n\n if not goals_info:\n continue\n\n serialized_goals_info['goals'] = []\n for goal in goals_info:\n serialized_goals_info['goals'].append({\n 'id': goal.id,\n 'type_name': goal.type_id.name,\n 'type_description': goal.type_description,\n 'type_condition': goal.type_id.condition,\n 'type_monetary': goal.type_id.monetary,\n 'type_suffix': goal.type_id.suffix,\n 'type_action': True if goal.type_id.action_id else False,\n 'type_display': goal.type_id.display_mode,\n 'state': goal.state,\n 'completeness': goal.completeness,\n 'computation_mode': goal.computation_mode,\n 'current': goal.current,\n 'target_goal': goal.target_goal,\n })\n\n all_goals_info.append(serialized_goals_info)\n return all_goals_info", "def show_user_info():\n \n vprint( 'Effective User :', os.geteuid())\n vprint( 'Effective Group :', os.getegid())\n vprint( 'Actual User :', os.getuid(), 'Login user:', os.getlogin())\n vprint( 'Actual Group :', os.getgid())\n vprint( 'Actual Groups :', os.getgroups())\n return", "def display_user():\n return User.display_user()", "def display_user():\n return User.display_user()", "def view_system():\n\n pass", "def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection", "def displayPlan(self, plan):\n display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n display_trajectory.trajectory_start = self.robot.get_current_state()\n display_trajectory.trajectory.append(plan)\n self.publisher_display_trajectory.publish(display_trajectory)", "def display():\n active = (\n app.config[\"PROJECTORS\"]\n if \"projectors\" not in request.args\n else request.args[\"projectors\"].split(\",\")\n )\n return render_template(\n \"lumen.html\",\n projectors=app.config[\"PROJECTORS\"],\n active=active,\n error=request.args.get(\"error\"),\n replace_url=url_for(\"display\", projectors=\",\".join(active)),\n )", "def test_switch_to_free_no_expiry(self):\n u = User.objects.get(username=\"test1\")\n u.userplan.expire = date.today() + timedelta(days=14)\n self.assertIsNotNone(u.userplan.expire)\n\n plan = Plan.objects.get(name=\"Free\")\n self.assertTrue(plan.is_free())\n self.assertNotEqual(u.userplan.plan, plan)\n\n # Switch to Free Plan\n u.userplan.extend_account(plan, None)\n self.assertEqual(u.userplan.plan, plan)\n self.assertIsNone(u.userplan.expire)\n self.assertEqual(u.userplan.active, True)", "def get_display_plan_limit(self, source):\n commitment = getattr(self.get_subscription(), 'commitment', {})\n return self.get_plan().get_display_price_data(source, commitment)[1]", "def human_print_plans(plans: List[Any]):\n # if plans is empty or none print no plans message\n if not plans:\n print('No plans')\n return\n\n name_pad = 5\n service_pad = 8\n for plan in plans:\n if len(plan['name']) > name_pad:\n name_pad = len(plan['name'])\n services_str = BackupServicePlan.service_list_to_str(plan['services'])\n if len(services_str) > service_pad:\n service_pad = len(services_str)\n\n name_pad += 1\n service_pad += 1\n header = f'{\"Name\":<{name_pad}} | # Tasks | {\"Services\":<{service_pad}} | Default'\n print(header)\n print('-' * (len(header) + 5))\n for plan in plans:\n task_len = len(plan['tasks']) if 'tasks' in plan and plan['tasks'] else 0\n print(f'{plan[\"name\"]:<{name_pad}} | {task_len:<7} | '\n f'{BackupServicePlan.service_list_to_str(plan[\"services\"]):<{service_pad}} | '\n f'{(plan[\"default\"] if \"default\" in plan else False)!s}')", "def get(self):\n return {\n 'candidate': 'Welcome to the DevOps test'\n }", "def show(self):\n if self.columns is None:\n return\n\n now = time.time()\n if self.columns != [\"default\"]:\n self.ctxt.sort_fields = None\n else:\n self.ctxt.sort_fields = []\n\n df = self._invoke_sqobj(self.sqobj.get,\n hostname=self.hostname, columns=self.columns,\n namespace=self.namespace,\n query_str=self.query_str,\n )\n\n self.ctxt.exec_time = \"{:5.4f}s\".format(time.time() - now)\n\n if not self.format or (self.format == 'text'):\n self.format = 'devconfig'\n return self._gen_output(df)", "def describe_provisioned_product_plan_single_page(self, **kwargs):\n return slurp(\n 'describe_provisioned_product_plan',\n self.describe_provisioned_product_plan,\n 'ProvisionedProductPlanDetails',\n **kwargs\n )", "def plante_info(id):\n plante = get_plante(id)\n return render_template(\n \"plante-info.html\",\n plante = plante,\n title = plante.get_name(),\n parterre = get_parterre(plante.get_parterre()))", "def dashboard(request):\r\n profile = get_object_or_404(Profile, user=request.user)\r\n wallet = Wallet.objects.get(user=request.user)\r\n history = History.objects.get(pk=1)\r\n referrals = Referral.objects.filter(referee=request.user).count()\r\n invoices = Invoice.objects.filter(issuer=request.user).count()\r\n return render(request, 'coin/dashboard.html', {'profile': profile, \r\n 'wallet': wallet, 'history': history, 'referrals': referrals, \r\n 'invoices': invoices})", "def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))", "def project_overview(project_name):\n if not db_find_project(project_name):\n abort(404)\n\n _project = Project.objects(project_name=project_name).first()\n # _forks = ProjectFork.objects(project_name=project_name, file_list__ne=[], total_changed_line_number__ne=0)\n _forks = ProjectFork.objects(project_name=project_name, total_changed_line_number__ne=0)\n\n # TODO _all_tags could be opted by AJAX\n _all_tags = {}\n if current_user.is_authenticated:\n _project_tags = ForkTag.objects(project_name=project_name, username=current_user.username)\n for tag in _project_tags:\n _all_tags[tag.fork_full_name] = tag.tags\n\n if current_user.is_authenticated:\n print('View: ', current_user.username, project_name)\n\n return render_template('project_overview.html', project=_project, forks=_forks, all_tags=_all_tags)", "def __init__(self, plan):\n self.plan = plan", "def func(self):\n player = self.caller\n session_list = [\n ob\n for ob in SESSIONS.get_sessions()\n if ob.account and ob.account.show_online(player)\n ]\n session_list = sorted(session_list, key=lambda o: o.account.key.lower())\n sparse = \"sparse\" in self.switches\n watch_list = player.db.watching or []\n if self.cmdstring == \"doing\":\n show_session_data = False\n else:\n show_session_data = player.check_permstring(\n \"Immortals\"\n ) or player.check_permstring(\"Wizards\")\n total_players = len(set(ob.account for ob in session_list))\n number_displayed = 0\n already_counted = []\n public_members = []\n if \"org\" in self.switches:\n from world.dominion.models import Organization\n\n try:\n org = Organization.objects.get(name__iexact=self.args)\n if org.secret:\n raise Organization.DoesNotExist\n except Organization.DoesNotExist:\n self.msg(\"Organization not found.\")\n return\n public_members = [\n ob.player.player\n for ob in org.members.filter(deguilded=False, secret=False)\n ]\n if show_session_data:\n table = prettytable.PrettyTable(\n [\"{wPlayer Name\", \"{wOn for\", \"{wIdle\", \"{wRoom\", \"{wClient\", \"{wHost\"]\n )\n for session in session_list:\n pc = session.get_account()\n if pc in already_counted:\n continue\n if not session.logged_in:\n already_counted.append(pc)\n continue\n delta_cmd = pc.idle_time\n if \"active\" in self.switches and delta_cmd > 1200:\n already_counted.append(pc)\n continue\n if \"org\" in self.switches and pc not in public_members:\n continue\n delta_conn = time.time() - session.conn_time\n plr_pobject = session.get_puppet()\n plr_pobject = plr_pobject or pc\n base = str(session.get_account())\n pname = self.format_pname(session.get_account())\n char = pc.char_ob\n if \"watch\" in self.switches and char not in watch_list:\n already_counted.append(pc)\n continue\n if not char or not char.item_data.fealty:\n fealty = \"---\"\n else:\n fealty = char.item_data.fealty\n if not self.check_filters(pname, base, fealty):\n already_counted.append(pc)\n continue\n pname = crop(pname, width=18)\n if (\n session.protocol_key == \"websocket\"\n or \"ajax\" in session.protocol_key\n ):\n client_name = \"Webclient\"\n else:\n # Get a sane client name to display.\n client_name = session.protocol_flags.get(\"CLIENTNAME\")\n if not client_name:\n client_name = session.protocol_flags.get(\"TERM\")\n if client_name and client_name.upper().endswith(\"-256COLOR\"):\n client_name = client_name[:-9]\n\n if client_name is None:\n client_name = \"Unknown\"\n\n client_name = client_name.capitalize()\n\n table.add_row(\n [\n pname,\n time_format(delta_conn)[:6],\n time_format(delta_cmd, 1),\n hasattr(plr_pobject, \"location\")\n and plr_pobject.location\n and plr_pobject.location.dbref\n or \"None\",\n client_name[:9],\n isinstance(session.address, tuple)\n and session.address[0]\n or session.address,\n ]\n )\n already_counted.append(pc)\n number_displayed += 1\n else:\n if not sparse:\n table = prettytable.PrettyTable([\"{wPlayer name\", \"{wFealty\", \"{wIdle\"])\n else:\n table = prettytable.PrettyTable([\"{wPlayer name\", \"{wIdle\"])\n\n for session in session_list:\n pc = session.get_account()\n if pc in already_counted:\n continue\n if not session.logged_in:\n already_counted.append(pc)\n continue\n if \"org\" in self.switches and pc not in public_members:\n continue\n delta_cmd = pc.idle_time\n if \"active\" in self.switches and delta_cmd > 1200:\n already_counted.append(pc)\n continue\n if not pc.db.hide_from_watch:\n base = str(pc)\n pname = self.format_pname(pc, lname=True, sparse=sparse)\n char = pc.char_ob\n if \"watch\" in self.switches and char not in watch_list:\n already_counted.append(pc)\n continue\n if not char or not char.item_data.fealty:\n fealty = \"---\"\n else:\n fealty = str(char.item_data.fealty)\n if not self.check_filters(pname, base, fealty):\n already_counted.append(pc)\n continue\n idlestr = self.get_idlestr(delta_cmd)\n if sparse:\n width = 30\n else:\n width = 55\n pname = crop(pname, width=width)\n if not sparse:\n table.add_row([pname, fealty, idlestr])\n else:\n table.add_row([pname, idlestr])\n already_counted.append(pc)\n number_displayed += 1\n else:\n already_counted.append(pc)\n is_one = number_displayed == 1\n if number_displayed == total_players:\n string = \"{wPlayers:{n\\n%s\\n%s unique account%s logged in.\" % (\n table,\n \"One\" if is_one else number_displayed,\n \"\" if is_one else \"s\",\n )\n else:\n string = (\n \"{wPlayers:{n\\n%s\\nShowing %s out of %s unique account%s logged in.\"\n % (\n table,\n \"1\" if is_one else number_displayed,\n total_players,\n \"\" if total_players == 1 else \"s\",\n )\n )\n self.msg(string)", "def view_vpanel():\n id = request.args.get('id')\n try:\n version = request.form[\"versions\"]\n except KeyError:\n version = None\n if id:\n status = check_virtualpanel_status(s, id)\n if not status:\n message = \"This panel has changes which cannot be viewed here as they have not been made live yet, if you have permission you can view these by editing the panel\"\n else:\n message = None\n panel_details = get_vpanel_details_by_id(s, id)\n for i in panel_details:\n if not version:\n version = panel_details.current_version\n panel_name = panel_details.name\n project_id = panel_details.project_id\n panel = get_regions_by_vpanelid(s, id, version)\n result = []\n rows = list(panel)\n if len(rows) != 0:\n bed = ''\n for i in rows:\n row = dict(zip(i.keys(), i))\n result.append(row)\n panel_name = i.panel_name\n current_version = i.current_version\n else:\n message = \"This Panel has no regions yet & may also have changes that have not been made live yet\"\n bed = 'disabled'\n current_version = version\n print(type(version))\n current_version = round(current_version, 1)\n version = round(float(version), 1)\n\n if check_user_has_permission(s, current_user.id, project_id):\n edit = ''\n else:\n edit = 'disabled'\n\n form = ViewPanel()\n v_list = get_prev_versions_vp(s, id)\n choices = []\n for i in v_list:\n choices.append((i, i))\n\n if (current_version, current_version) not in choices:\n choices.append((current_version, current_version))\n\n form.versions.choices = choices\n form.versions.default = current_version\n form.process()\n\n table = []\n\n for i in result:\n line = []\n line.append(i['chrom'])\n line.append(str(i['region_start']))\n line.append(str(i['region_end']))\n line.append(i['gene_name'])\n line.append(i['name'].replace(',', ' '))\n table.append(line)\n\n return render_template('panel_view.html', table=json.dumps(table), panel=table, panel_name=panel_name,\n edit=edit, bed=bed,\n version=version, panel_id=id, message=message, url=url_for('panels.view_vpanel'),\n scope='Virtual', form=form)\n\n else:\n return redirect(url_for('panels.view_virtual_panels'))", "def user_profile(request, id):\n user = User.objects.get(id=id)\n\n return render(request, \"core/profile.html\",{\n \"user\": user,\n \"range\": range(user.stars),\n \"bids_placed\": BuyProduct.objects.filter(\n customer = user\n )\n })", "def index(request):\n params = get_user_profile_params(request)\n\n competition = Competition.get_active()\n params['top_competition_id'] = competition.id\n params['minify_js'] = settings.MINIFY_JS\n\n params['first_page_text'] = ''\n config = Config.objects.all()\n if config.count() > 0:\n params['first_page_text'] = config[0].first_page_text\n\n #order email test\n #order = Order.objects.get(pk=25)\n #send_order_email(order.email, order, order.items.all)\n\n return render(request, 'base.html', params)", "def get_project_info():\n\n title = request.args.get('project')\n\n project_info_list = hackbright.get_project_by_title(title)\n\n html = render_template(\"project_info.html\",\n project_info_list=project_info_list)\n return html", "def report_current(request):\n apps = Application.objects.filter(app_status__name__icontains='Current').order_by('acronym', 'release')\n return render_to_response('application/search_results.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));" ]
[ "0.6693725", "0.66212505", "0.6312085", "0.61751384", "0.61356807", "0.60612977", "0.60564137", "0.59662765", "0.5901057", "0.58823967", "0.5879139", "0.585081", "0.58050555", "0.57663673", "0.5763864", "0.57417154", "0.5737406", "0.57106596", "0.57047117", "0.56790864", "0.5674888", "0.5672883", "0.56625134", "0.5657511", "0.56480134", "0.5645293", "0.5624678", "0.5597521", "0.5568976", "0.5560515", "0.54867136", "0.54730785", "0.5451478", "0.5442808", "0.5421241", "0.5421131", "0.5410934", "0.5407507", "0.5391863", "0.53910774", "0.53898185", "0.5375304", "0.5365534", "0.53654414", "0.5365083", "0.5355323", "0.5354536", "0.5318895", "0.53143406", "0.53133065", "0.5299016", "0.52844906", "0.5281651", "0.52688533", "0.5259895", "0.5243496", "0.52366954", "0.5212486", "0.5210409", "0.5202962", "0.51919746", "0.51840883", "0.5183308", "0.51784235", "0.51682216", "0.51661956", "0.5164222", "0.51602453", "0.5158468", "0.5150285", "0.5146491", "0.51336056", "0.51207954", "0.51088154", "0.5108799", "0.5101977", "0.5098589", "0.5095089", "0.5095089", "0.50884116", "0.5060449", "0.5058777", "0.5038171", "0.503651", "0.50317734", "0.5023715", "0.5022149", "0.5021972", "0.5016923", "0.5011096", "0.50063336", "0.50030273", "0.49959388", "0.49839658", "0.49839512", "0.49752888", "0.49724478", "0.49670556", "0.49651164", "0.4962079" ]
0.6910045
0
Get or Update goal by id
def self_goal_by_id(request, goal_id): current_user = request.user fields_map = { 'goal_answers': lambda g: [ { 'id': answ.id, 'title': answ.title, "created_by": answ.created_by.username, "created_at": answ.created_at, "file": answ.file.url } for answ in g.goal_answers.all() ] } fields = ['title', 'goal_answers', 'id', 'is_achieved'] goal = Goal.objects.get(pk=goal_id) if request.method == 'POST': if goal.created_by != current_user: raise PermissionDenied("You can edit only your own goals") f = GoalForm(data=request.json_body) if not f.is_valid(): return JsonResponse(data={"detail": json.loads(f.errors.as_json())}, status=400) goal = f.save(current_user, goal) return JsonResponse( data={f: fields_map[f](goal) if f in fields_map else getattr(goal, f) for f in fields}, status=200 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)", "def goal(self, goal_id):\r\n return Goal(self, goal_id)", "def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)", "def updateOne(id):\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to Find the given client'}\n\n # Update the URL\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n update(Followup).\n where(Followup.columns.id == id).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Update the given client'}\n return {'status': \"Update Succesful\"}", "def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)", "def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()", "def update_goal(self):\n pass", "def put(self, problem_id):\n args = self.request.arguments\n x = args.pop('latitude')\n y = args.pop('longitude')\n args['location'] = create_location(x, y)\n self.sess.query(Problem).filter_by(id=int(problem_id)). \\\n update(args)\n\n self.sess.commit()\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"UPDATED\"\n )\n self.sess.add(activity)\n self.sess.commit()", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500", "def put(self, id):\n adm = Administration()\n print(api.payload)\n lp = LearnProfile.from_dict(api.payload)\n if lp is not None:\n lp.set_id(id)\n adm.save_learnprofile(lp)\n return lp, 200\n\n else:\n return '', 500", "def put(self, id):\r\n try:\r\n self.valid_args()\r\n existing = db.session.query(self.__class__).get(id)\r\n if existing is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).update(existing)\r\n data = json.loads(request.data)\r\n # may be missing the id as we allow partial updates\r\n data['id'] = id\r\n # Clean HATEOAS args\r\n data = self.hateoas.remove_links(data)\r\n inst = self.__class__(**data)\r\n db.session.merge(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return Response(json.dumps(inst.dictize()), 200,\r\n mimetype='application/json')\r\n except IntegrityError:\r\n db.session.rollback()\r\n raise\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='PUT')", "def update_ship(id):\n data = request.get_json()\n print(data)\n for ship in db['ships']:\n if ship['id'] == id:\n if data['name']:\n ship['name'] == data['name']\n if data['age']:\n ship['age'] == data['age']\n return ship, status.HTTP_202_ACCEPTED\n return {}, status.HTTP_404_NOT_FOUND", "def put(self, id ):\n adm = Administration()\n print(api.payload)\n p = Profile.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_profile(p)\n return p, 200\n else:\n return '', 500", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def put(self, id):\n data = request.json\n update_scenario(id, data)\n return None, 204", "def put(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n if not all(\n [request.form.get('roll_no'),\n request.form.get('name'),\n request.form.get('batch'),\n request.form.get('programme'),\n request.form.get('category'),]):\n \n return {'msg':'Field(s) missing.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.roll_no = request.form.get('roll_no'),\n ach.name = request.form.get('name'),\n ach.batch = checkBatch(request.form.get('batch')),\n ach.programme = request.form.get('programme'),\n ach.category = request.form.get('category'),\n\n ach.save()\n data = ach.toDict()\n\n return {'data' : data}, 200\n\n except (ValueError, mongoalchemy.exceptions.BadValueException) as e:\n print(e)\n return {'msg':'Invalid form data.'}, 400\n\n except Exception as e:\n print(e)\n return {'msg':'Could not modify academic achievement.'}, 500", "def put(self, _id):\n payload = self.request.json\n # TODO: validate the json before updating the db\n self.app.db.jobs.update({'_id': int(_id)}, {'$set': {'status': payload.get('status'), 'activity': payload.get('activity')}})", "def put(self,id):\n adm = Administration()\n s = Suggestion.from_dict(api.payload)\n if s is not None:\n s.set_id(id)\n adm.save_suggestion(s)\n return s, 200\n\n else:\n return '', 500", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def put(self, id):\n return userDao.update(id, api.payload)", "def test_update_goal(self):\n pass", "def put(self, id):\n return None, 204", "def edit_a_parcel(destination, id):\n query = \"\"\"UPDATE parcels SET destination = %s WHERE id = %s\"\"\"\n tuple =(destination , id)\n db.insert(query, tuple)", "def put(self, id):\n return update_msg(request.json, id)", "def set_goal(self, new_goal, updating=False):\n GOAL_QUERY = \"\"\"UPDATE Goal SET description = %s WHERE id = %s AND curriculum_name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Goal (id, curriculum_name, description) VALUES (%s, %s, %s)\"\"\"\n\n if not updating:\n self.db_cursor.execute(\n GOAL_QUERY,\n (new_goal.id, new_goal.curriculum_name, new_goal.description))\n else:\n self.db_cursor.execute(\n GOAL_QUERY,\n (new_goal.description, new_goal.id, new_goal.curriculum_name))\n self.db_connection.commit()", "def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)", "def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204", "def get(self, _id):", "def put(self, id):\n req = api.payload\n try:\n result = update_task(\n get_db(),\n id,\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def update(task_id, task):\n # Get the task requested from the db into session\n update_task = TaskList.query.filter(TaskList.task_id == task_id).one_or_none()\n\n # Did we find the task?\n if update_task is not None: \n\n # turn the passed in task into a db object\n schema = TaskListSchema()\n update = schema.load(task, session=db.session).data\n print(update)\n\n # Set the id to the task we want to update\n update.task_id = update_task.task_id\n\n # merge the new object into the old and commit it to the db\n db.session.merge(update)\n db.session.commit()\n\n # return updated task in the response\n data = schema.dump(update_task).data\n\n return data, 200\n # otherwise, nope, that's an error\n else:\n abort(\n 404, \"Task {task_id} not found\".format(task_id=task_id),\n )", "def test_editing_goal(self):\n\n form_data = {\"goal-body\": \"Goal body edit.\"}\n goal = edit_patient_goal(1, form_data)\n\n self.assertEqual(\"Goal body edit.\", goal.goal_body)", "def patch(self, id):\n try:\n task = update_status(get_db(), id, Status[api.payload[\"status\"]])\n if not task:\n api.abort(404, \"Invalid Task\")\n return task_to_dict(task)\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def update_goal_info(self):\n self._goal_info_cache = self._get_goal_info()", "def put(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for requested Slip number already in use. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n for match in results:\n if slip_data['number'] == match.number:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n else:\n slip.current_boat = None\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n else:\n slip.arrival_date = None\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n else:\n slip.departed_boat = None\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n else:\n slip.departure_date = None\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def get(self, _id):\n if _id is None:\n return jsonify([user.serialize() for user in Goal.query.all()])\n else:\n return jsonify(Goal.query.filter_by(id=_id).all())", "def put(self, id):\n return add_comment(request.json, id)", "def patch(self, id=None):\n if id:\n slip = test4ValidEntity(id)\n if slip == None:\n self.response.set_status(404)\n else:\n slip_data = json.loads(self.request.body)\n if 'number' in slip_data:\n \"\"\" Test for Slip number already taken. \"\"\"\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n if slip.number in results:\n slip.number = getSlipNum()\n else:\n slip.number = slip_data['number']\n if 'current_boat' in slip_data:\n if slip.current_boat == None:\n slip.current_boat = slip_data['current_boat']\n else:\n \"\"\" Query for the Boat and change at_sea to False. \"\"\"\n query = Boat.query(Boat.id == slip_data['current_boat'])\n result = query.fetch(limit = 1)\n if 'at_sea' in result:\n result.at_sea = False\n slip.current_boat = slip_data['current_boat']\n if 'arrival_date' in slip_data:\n slip.arrival_date = slip_data['arrival_date']\n if 'departed_boat' in slip_data:\n slip.departed_boat = slip_data['departed_boat']\n if 'departure_date' in slip_data:\n slip.departure_date = slip_data['departure_date']\n slip.put()\n slip_dict = slip.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))", "def update(self, id, id_col='name'):\n instance = self.get_one_instance(id_col, id)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n errors, data = self.format_and_control(request.form, obj=instance)\n\n if len(errors) > 0:\n set_session_var('errors', dict(errors))\n return None\n\n data = get_only_updated_values(instance, data)\n\n if len(data) == 0:\n return None\n\n res = update_in_db(instance, data)\n\n if res != 'updated':\n set_session_var('errors', str(res))\n return None\n else:\n set_session_var('success', res)\n\n if self.module_fn is not None:\n self.module_fn(instance, data)\n\n return instance", "def update_task_by_id(task_id):\n try:\n updated_task = get_task_from_request_form(request)\n tasks = mongo.db.tasks\n\n result = tasks.update_one(\n {\"_id\": ObjectId(task_id)},\n {\n \"$set\": {\n \"title\": updated_task['title'],\n \"reference\": updated_task['reference'],\n \"description\": updated_task['description'],\n \"status\": updated_task['status'],\n \"visible\": updated_task['visible']\n }\n })\n return json_util.dumps(get_task_by_id(task_id))\n except:\n abort(400)", "def get_goal(self, new_goal):\n\n GOAL = \"\"\"SELECT COUNT(*) FROM Section WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\n \"\"\"SELECT description FROM Goal WHERE id = %s AND curriculum_name = %s\"\"\",\n (new_goal.id, new_goal.curriculum_name,))\n c = self.db_cursor.fetchall()\n ret = Goal()\n if c:\n ret.description = c[0][0]\n ret.id = new_goal.id\n ret.curriculum_name = new_goal.curriculum_name\n else:\n ret = None\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve goal: \" + str(new_goal.id))\n\n return ret", "def goals_for(game_id, player=None, goal_name=None):\n test_game = game.Game(parse_game.parse_game( \\\n codecs.open('testing/testdata/'+game_id, encoding='utf-8').read()))\n found_goals = goals.check_goals(test_game)\n\n if player:\n found_goals = [g for g in found_goals if g['player'] == player]\n\n if goal_name:\n found_goals = [g for g in found_goals if g['goal_name'] == goal_name]\n\n return found_goals", "def edit_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.update_one({\"_id\": task[\"_id\"]}, {\"$set\": {\"content\": content}})\n\n response = jsonify()\n response.status_code = 200\n return response", "def set_goal(self, goal):\n self._pid_lock.acquire() # Acquire Lock\n self._goal = goal\n self._pid_lock.release() # Release Lock", "def status_update(request, id=None):\n #obj = Todo.objects.all()\n user = request.user if request.user.is_authenticated else None\n Todo.objects.filter(id=id).update(mark_done=True, answered_by= user)\n return redirect('lists:alllist')", "def set_course_goal(self, goal_id, course_name):\n self.db_cursor.execute(\n \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\",\n (course_name, goal_id))\n self.db_connection.commit()", "def test_editing_patient_goals(self):\n\n data = {\"goal-body\": \"Edited goal body.\"}\n result = self.client.post(\"/goal/1/edit.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Edited goal\", result.data)", "def update_task(project_id,task_id):\n data = request.get_json()\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n permission = has_project_permission(project, g.user)\n old_task = Task.query.filter_by(id=task_id)\n if not old_task:\n abort(404, f'There is no task with ID of {task_id}.')\n\n if old_task:\n db_session.delete(old_task)\n db_session.commit()\n name = data['name']\n project_id = data['project_id']\n description = data['description']\n completion_status = data['completion_status']\n created_date = data['created_date']\n deadline_date = data['deadline_date']\n new_task = Task(\n name=name, description=description, completion_status=completion_status,\n created_date = created_date, deadline_date = deadline_date, project_id=project_id, created_by=g.user)\n db_session.add(new_task)\n db_session.commit()\n return {\n 'success': True,\n 'result': task_schema.dump(new_task),\n 'message': \"Successfully Updated the Task.\",\n }", "def update(self, id, obj):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('put', url, data={self.singular: obj})", "def put(self, id):\n self.not_supported()", "def update_project(id):\n if request.method == \"POST\":\n result = update_project_to_db(\n id,\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n project = get_project(id)\n return render_template(\"edit_project.html\", **project)", "def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )", "def get(self, story_id):", "def change_location(self, id):\n for p in parcels:\n if p[\"id\"] == id:\n if not request.json[\"location\"]:\n return {\"Error\": \"You must add a location\"}, 400\n else:\n location = request.json[\"location\"]\n p[\"location\"] = location\n return p, 201\n else:\n return \"Parcel not found\", 404", "def put(self, op_id: str) -> Response:\n data = request.get_json()\n\n authorized: bool = Users.objects.get(id=get_jwt_identity()).roles.organization or \\\n Users.objects.get(id=get_jwt_identity()).roles.admin\n\n if authorized:\n try:\n res = Opportunity.objects.get(id=op_id).update(**data)\n except ValidationError as e:\n return bad_request(e.message)\n return jsonify(res)\n else:\n return forbidden()", "def put(id: int):\r\n parser = reqparse.RequestParser()\r\n parser.add_argument(\"title\", type=str)\r\n args = parser.parse_args()\r\n if args:\r\n filename = Path(__file__).parent / \"recipe-data.csv\"\r\n files = import_file.Files()\r\n recipe_load = files.import_from_csv(filename)\r\n recipes = Recipes(recipe_load)\r\n a_recipe = recipes.update_recipe(id, args)\r\n files.export_to_csv(recipes, filename)\r\n return jsonify(a_recipe)\r\n else:\r\n return abort(404)", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def set_goal(self, **kwargs):\n return self.env.set_goal(**kwargs)", "def put(self, id):\n return Contacts().update_one(id, request.json)", "def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job", "def patch(id):\n\n if not request.json or not 'name' in request.json:\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name is required\"\n }), 400)\n\n data = request.get_json(force=True)\n if isinstance(data['name'], int):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Name should be of type strings\"\n }), 400)\n\n if Party.get_party_by_name(data[\"name\"]):\n return make_response(jsonify({\n \"status\": 409,\n \"error\": \"Party name already taken\"\n }), 409)\n if Validate.validate_empty_string(data_inputed=data[\"name\"]):\n return make_response(jsonify({\n \"status\": 400,\n \"error\": \"Party name cannot be empty\"\n }), 400)\n update_data = request.get_json(force=True)\n party_to_edit = Party.get_party_by_id(id=id)[0]\n party_to_edit = Party.update_party(update_data=update_data,id=id)\n return make_response(jsonify({\n \"status\": 201,\n \"data\": party_to_edit\n }), 201)", "def update_user(id):\n pass", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "async def _edit_game_given(self, game_id, given):\n\n await self.bot.db.execute(\n \"\"\"\n UPDATE giveaways_game\n SET given = :given\n WHERE game_id = :game_id\n \"\"\",\n {\n 'game_id': game_id,\n 'given': int(given),\n }\n )\n\n await self.bot.db.commit()", "def put(self, request, pk):\n return self.update(request, pk)", "def update_finding_id(finding, new_id, updated_at=None):\n finding[\"Id\"] = new_id\n if updated_at:\n finding[\"UpdatedAt\"] = updated_at\n return finding", "def update_problem(problem_id, problem):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n \n try:\n if result is not None:\n \tproblem = Problem.from_dict(connexion.request.get_json())\n \treturn jsonify(Firebase.put('/problems', 'id_' + str(problem_id), problem))\n except ValueError:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND", "def get(self, id):\n return {'id': id}", "def test_deleting_goal(self):\n\n delete_goal(1)\n self.assertIsNone(Goal.query.get(1))", "def update(self, id, params):\n response = self._connection.session.put(self.url + \"/%s\" % id, json=params)\n return self._raise_or_return_json(response)", "def get(self, cls, id):\n pass", "def update(cls, plan_id, **kwargs):\n return cls().requests.put(f\"plan/{plan_id}\", data=kwargs,)", "def get_object(id):", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "async def update_one(self, where, data):\n\n pass", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "def update(self, id: str, **kwargs: dict):\n kwargs = self._preprocess(**kwargs)\n j = self._jsonify(kwargs)\n\n if isinstance(id, uuid.UUID):\n id = str(id)\n\n with rconnect() as conn:\n query = self.q.get(id).update(j, return_changes=True)\n rv = query.run(conn)\n if len(rv['changes']):\n return self.__model__(rv['changes'][0]['new_val'])\n else:\n return self.get(id)", "def goal(self, goal):\n\n self._goal = goal", "def patch(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n if 'type' in boat_data:\n boat.type = boat_data['type']\n if 'length' in boat_data:\n boat.length = boat_data['length']\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def _update():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\tfor ID in IDs:\n\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.finish_status: myOpt.f})\n\n\t\tif myOpt.vt:\n\t\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.version_time: myOpt.vt})\n\n\t#commit\n\tmyTaskSession.commit()\n\n\t\"\"\"\n\t#ERR: not given itsm id for update \n\tif not myOpt.id:\n\t\tprint('Error: no itsm id given for update finish_status to 1')\n\t\treturn 1\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\t\n\tquery.filter(WorkToolkitDB.db.Task.id == myOpt.id).update({'finish_status': myOpt.f})\n\tmyTaskSession.commit()\n\n\t\n\tdata = query.filter(WorkToolkitDB.db.Task.id == myOpt.id).all()\n\tfor record in data:\n\t\t\t#record_arr = record.to_array()\n\t\t\tpt.add_row(record.to_array())\n\n\tprint(pt)\n\t\"\"\"\n\n\treturn 0", "def update_project(project_id):\n\n project = mongo.db.projects\n project.find_one_and_update({'_id': ObjectId(project_id) },\n {'$set':\n {'title': request.form.get('title'),\n 'status': request.form.get('status'),\n 'deadline': datetime.strptime(request.form.get('deadline'), '%d/%m/%Y'),\n 'note': request.form.get('note'),\n 'brief': request.form.get('brief')}})\n return redirect(url_for('projects'))", "def updateOne(self,ident):\n \tLOGGER.info(\"lazily updating {}\".format(ident))\n \tself.idToUpdate=ident\n \tself.newState=''\n \tself.save()", "def rewrite_goal(self, id, th_name, *, backward=False):\n self.apply_tactic(id, tactic.rewrite(), args=th_name)", "def update_game(game_id):\n\n game = filter(lambda t: t[\"id\"] == game_id, games)\n if len(game) == 0:\n abort(404)\n if not request.json:\n abort(400)\n if \"teams\" in request.json and type(request.json[\"teams\"]) != unicode:\n abort(400)\n if \"score\" in request.json and type(request.json[\"score\"]) is not unicode:\n abort(400)\n if \"city\" in request.json and type(request.json[\"city\"]) is not unicode:\n abort(400)\n if \"date\" in request.json and type(request.json[\"date\"]) is not unicode:\n abort(400)\n game[0][\"teams\"] = request.json.get(\"teams\", game[0][\"teams\"])\n game[0][\"score\"] = request.json.get(\"score\", game[0][\"score\"])\n game[0][\"city\"] = request.json.get(\"city\", game[0][\"city\"])\n game[0][\"date\"] = request.json.get(\"date\", game[0][\"date\"])\n return jsonify({\"game\": game[0]})", "def put(self, id=None):\n if id:\n boat = test4ValidEntity(id)\n if boat == None:\n self.response.set_status(404)\n else:\n boat_data = json.loads(self.request.body)\n if 'name' in boat_data:\n boat.name = boat_data['name']\n else:\n boat.name = None\n if 'type' in boat_data:\n boat.type = boat_data['type']\n else:\n boat.type = None\n if 'length' in boat_data:\n boat.length = boat_data['length']\n else:\n boat.length = None\n boat.put()\n boat_dict = boat.to_dict()\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dict))", "def set_goal(self, goal: GoalType) -> None:\n self.goal = goal", "def update_drink(jwt, drink_id):\n try:\n drink = Drink.query.filter(Drink.id == drink_id).one_or_none()\n\n if drink is None:\n abort(404)\n\n body = request.get_json()\n req_title = body.get('title', drink.title)\n req_recipe = json.dumps(body.get('recipe', drink.recipe))\n\n drink.title = req_title\n drink.recipe = req_recipe\n drink.update()\n\n return jsonify({\n 'success': True,\n 'drinks': [drink.long()]\n }), 200\n\n except Exception as e:\n abort(422)", "def put(self, copy_id):\n body = request.get_json()\n copy = db.session.query(models.Copy).filter_by(id=copy_id).first()\n if copy is None:\n return 'copy is not found', 404\n if invalid_user(copy.user):\n return 'Unauthorized User', 401\n copy.status = body.get('status')\n db.session.add(copy)\n db.session.commit()\n return copy.serialize(), 200", "def get(self, id):\n return self.__model__.query.get(id)", "def get_by_id(cls, id):\n return cls.query().get(id)", "def update(self, id):\n loan = self._model.query.get(id)\n loan.original_due_date = loan.due_date\n loan.due_date = loan.due_date + 1 * TimeUnits.MONTH_IN_SEC\n\n db.session.add(loan)\n\n try:\n db.session.commit()\n except Exception as exc:\n print(f'Something went wrong: {exc}')\n db.session.rollback()", "def update(self, **payload):\n update_story_url =\"https://www.pivotaltracker.com/services/v5/projects/{}/stories/{}\".format(self.project_id, self.story_id)\n return _perform_pivotal_put(update_story_url, payload)", "def delete(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.remove()\n return {'msg':'Academic achievement deleted.'}, 200\n\n except Exception as e:\n print(e)\n return {'msg':'Could not delete academic achievement.'}, 500", "def get_test_goal(context, **kw):\n obj_cls = objects.Goal\n db_data = db_utils.get_test_goal(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def update_one(\n self, *args, session: Optional[ClientSession] = None\n ) -> UpdateOne:\n return self.update(*args, session=session)", "def update_step(self, step_id, data):\n try:\n self._session.query(StepEntity).\\\n filter(StepEntity.id == step_id).\\\n update(data)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def update_game(username, password, game_id, lat, lng):\n\n payload = {'lat': lat, 'lng': lng}\n url = \"{}{}/game/{}\".format(hostname, rest_prefix, game_id)\n r = requests.put(url, auth=(username, password), data=payload)\n response = r.json()\n\n print response" ]
[ "0.68321764", "0.6705152", "0.6541664", "0.6067706", "0.6032877", "0.6005164", "0.59921056", "0.5976328", "0.59698373", "0.5885591", "0.58422995", "0.58416253", "0.5819017", "0.5806884", "0.58029574", "0.5794082", "0.57710695", "0.57252264", "0.57099134", "0.56292725", "0.56237155", "0.56168914", "0.55942357", "0.55751616", "0.5572422", "0.55689883", "0.5552304", "0.55462456", "0.54960907", "0.5491878", "0.54603195", "0.5459008", "0.5456237", "0.54510385", "0.5445018", "0.5438216", "0.540073", "0.5377425", "0.5376836", "0.5373087", "0.53533983", "0.53468746", "0.53149575", "0.52961755", "0.5285731", "0.52805436", "0.52753377", "0.5256109", "0.52520907", "0.52472275", "0.52081037", "0.5177014", "0.5154345", "0.5137273", "0.513693", "0.5128101", "0.51260155", "0.51260155", "0.5123415", "0.5118326", "0.51132965", "0.5108906", "0.5098434", "0.5095777", "0.5082631", "0.5081826", "0.50808275", "0.5077567", "0.50576794", "0.50521845", "0.50417584", "0.5034441", "0.5024445", "0.5021112", "0.5015474", "0.5014513", "0.50006145", "0.4996027", "0.49948606", "0.4992461", "0.49916953", "0.4985199", "0.49848753", "0.4984073", "0.49778852", "0.49760804", "0.49752533", "0.49746466", "0.49685928", "0.4965567", "0.49647057", "0.49635598", "0.49623087", "0.4961685", "0.49552116", "0.4954481", "0.49538597", "0.4945663", "0.49441546", "0.49387833" ]
0.67704624
1
Get or Update goal's answer by id
def set_get_answer(request, answer_id): current_user = request.user fields = ["created_by", "title", "created_at", 'id', 'file'] fields_map = { "created_by": lambda a: a.created_by.username, "file": lambda a: a.file.url if a.file else '' } answ = GoalAnswer.objects.get(pk=answer_id) if request.method == 'POST': f = GoalAnswerForm(data=request.json_body) if not f.is_valid(): return JsonResponse(data={"detail": json.loads(f.errors.as_json())}, status=400) answ = f.save(current_user, answ) return JsonResponse( data={f: fields_map[f](answ) if f in fields_map else getattr(answ, f) for f in fields}, status=400 ) else: return JsonResponse( data={f: fields_map[f](answ) if f in fields_map else getattr(answ, f) for f in fields}, status=400 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_goal_by_id(request, goal_id):\n current_user = request.user\n\n fields_map = {\n 'goal_answers': lambda g: [\n {\n 'id': answ.id,\n 'title': answ.title,\n \"created_by\": answ.created_by.username,\n \"created_at\": answ.created_at,\n \"file\": answ.file.url\n } for answ in g.goal_answers.all()\n ]\n }\n\n fields = ['title', 'goal_answers', 'id', 'is_achieved']\n\n goal = Goal.objects.get(pk=goal_id)\n\n if request.method == 'POST':\n if goal.created_by != current_user:\n raise PermissionDenied(\"You can edit only your own goals\")\n\n f = GoalForm(data=request.json_body)\n\n if not f.is_valid():\n return JsonResponse(data={\"detail\": json.loads(f.errors.as_json())}, status=400)\n\n goal = f.save(current_user, goal)\n\n return JsonResponse(\n data={f: fields_map[f](goal) if f in fields_map else getattr(goal, f) for f in fields}, status=200\n )", "def get_answer(self, answer_id):\n return self.answers[answer_id]", "def put(self, question_id, answers_id):\n \n data = request.json\n \n # validate(data, RESPONSE_SCHEMA)\n\n response = Answers.response_to_answer(question_id, answers_id, data)\n\n return response", "def put(self,id):\n adm = Administration()\n s = Suggestion.from_dict(api.payload)\n if s is not None:\n s.set_id(id)\n adm.save_suggestion(s)\n return s, 200\n\n else:\n return '', 500", "def update_question(self, question: str, question_type: int, answer: [str],\n manually_grading: bool, points: float,\n test_id: int, question_id: int) -> Optional[int]:\n try:\n\n new_question = self.session.query(Questions) \\\n .filter(Questions.id == question_id) \\\n .update({'question': question, 'question_type': question_type, 'answer': answer,\n 'manually_grading': manually_grading,\n 'points': points, 'test_id': test_id, })\n self.session.flush()\n return new_question.id\n except Exception as excpt:\n self.session.rollback()\n print(f'Couldn\\'t add question: {excpt}')\n return None", "def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()", "def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)", "def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)", "def __getitem__(self, answer_id):\n return self._answer_dependencies[answer_id]", "def update(self, answer):\n position = self.find(answer)\n\n if position is None:\n raise ValueError(\"Answer instance does not exist in store\")\n else:\n self.answers[position]['value'] = answer.value", "def update_goal(self):\n pass", "def bot_answer(update, context):\n question = update.message.text\n answer = go_bot(question)\n print(question, answer)\n print(stats)\n print()\n update.message.reply_text(answer)", "def test_update_answer(self):\n user_token, _, question_id, answer_id = self.add_answer()\n\n headers = self.get_request_header(user_token)\n data = json.dumps(self.update_answer)\n url = f'/questions/{question_id}/answers/{answer_id}'\n\n response = self.test_client.put(url, headers=headers, data=data)\n\n self.assertEqual(response.status_code, 200)", "def post_answer(payload):\n print(\"payload: \\n\", payload)\n question_id = payload.get(\"id\")\n response = client.query(\n KeyConditionExpression=Key('id').eq(question_id)\n )\n \n item = response['Items'][0]\n print(type(item))\n print(\"item: \\n\", item)\n for answer in payload[\"answers\"]:\n votes = item[\"answers\"].get(answer)\n item[\"answers\"].update({answer: votes + 1})\n\n print(\"updated item: \\n\", item)\n client.put_item(Item=item)", "def test_update_answer(self):\n self.app.post(\"/api/v2/answers/1/answer\", headers=self.headers,\n data=json.dumps(self.answer)) \n response = self.app.patch(\n \"/api/v2/answers/1/answer\", headers=self.headers, data=json.dumps(self.answer))\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result['status'], 200)", "def status_update(request, id=None):\n #obj = Todo.objects.all()\n user = request.user if request.user.is_authenticated else None\n Todo.objects.filter(id=id).update(mark_done=True, answered_by= user)\n return redirect('lists:alllist')", "def add_or_update(self, answer):\n if self.exists(answer):\n self.update(answer)\n else:\n self.add(answer)", "def updateOne(id):\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to Find the given client'}\n\n # Update the URL\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n update(Followup).\n where(Followup.columns.id == id).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Update the given client'}\n return {'status': \"Update Succesful\"}", "def test_update_goal(self):\n pass", "def goal(self, goal_id):\r\n return Goal(self, goal_id)", "def put(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n if not all(\n [request.form.get('roll_no'),\n request.form.get('name'),\n request.form.get('batch'),\n request.form.get('programme'),\n request.form.get('category'),]):\n \n return {'msg':'Field(s) missing.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.roll_no = request.form.get('roll_no'),\n ach.name = request.form.get('name'),\n ach.batch = checkBatch(request.form.get('batch')),\n ach.programme = request.form.get('programme'),\n ach.category = request.form.get('category'),\n\n ach.save()\n data = ach.toDict()\n\n return {'data' : data}, 200\n\n except (ValueError, mongoalchemy.exceptions.BadValueException) as e:\n print(e)\n return {'msg':'Invalid form data.'}, 400\n\n except Exception as e:\n print(e)\n return {'msg':'Could not modify academic achievement.'}, 500", "def accept_answer(request, answer_id):\n raise NotImplementedError", "def test_edit_answer(self):\n user = self.create_user()\n user_id = user[0] # answer author user id\n question_id = int(self.create_question()[0])\n # token should be encoded with the id of the answer author\n auth_token = user[1]\n new_answer = self.post_data(question_id, auth_token=auth_token).json\n answer_id = int(new_answer['answer_id'])\n headers = {\"Authorization\":\"Bearer {}\".format(auth_token)}\n path = \"/api/v2/questions/{}/answers/{}\".format(question_id,\n answer_id)\n data = {\"text\":\"edited answer\"}\n result = self.client.put(path,\n headers=headers,\n data=json.dumps(data),\n content_type='application/json')\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.json['value'], data['text'])", "def solution(p, id_):\n out = list()\n run_prog(read_input(), [id_], out)\n print(f\"Solution to part {p}: {out[-1]}\")", "def put(self, id):\n adm = Administration()\n print(api.payload)\n lp = LearnProfile.from_dict(api.payload)\n if lp is not None:\n lp.set_id(id)\n adm.save_learnprofile(lp)\n return lp, 200\n\n else:\n return '', 500", "def get_answer(answer_id, api_site_parameter, body = False, comments = False, pagesize = 1):\n path = \"answers/%d\" % answer_id\n \n query_filter = ')(Y_v2R5Tz'\n \n if body:\n query_filter = '-m84pZ4-YWK'\n if comments:\n query_filter = ')(Ybxr-pC9'\n if body and comments:\n query_filter = 'D9kY06hX'\n \n results = __fetch_results(path, api_site_parameter, filter = query_filter, pagesize = pagesize)\n return results", "def update_points_and_correct_words(game_id: Union[UUID, str], correct_word: str) -> Game:\n game = Game.objects.select_for_update().get(id=game_id)\n\n if correct_word:\n points = game.points\n game.points = points + len(correct_word)\n\n correct_words_str = game.correct_words\n if correct_words_str:\n correct_words = correct_words_str.split(',')\n else:\n correct_words=[]\n\n correct_words.append(correct_word)\n correct_words_str = ','.join(correct_words)\n game.correct_words = correct_words_str\n\n game.save()\n\n return game", "def get_response(self, id):\n if not id:\n return None\n for response in self._responses:\n if response._id == id:\n return response\n pass\n new_res = self._add_response(id)\n return new_res", "def update_question(token, question_id):\n try:\n question = Question.query.filter_by(id=question_id).first()\n if not question:\n abort(STATUS_NOT_FOUND)\n\n question_data = request.get_json()\n update_question_in_db(question, question_data)\n return jsonify({\n 'success': True,\n 'question': question.format()\n })\n except Exception as exp:\n abort(exp.code)", "def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)", "def put(self, id):\n return update_msg(request.json, id)", "def get_vote(self, id: int) -> dict:", "def fetch_specific_question(self, question_id):\n\n question = self.sql.fetch_details_by_criteria(\n \"question_id\", question_id, \"questions\")\n\n response, status = \"\", 200\n\n if not question:\n\n return self.makeresp(\"Question not found\", 404)\n\n user = self.sql.get_username_by_id(int(question[0][2]))\n\n response = self.makeresp({\n\n \"user\": user[0],\n \"meetup\": question[0][1],\n \"title\": question[0][3],\n \"body\": question[0][4],\n \"createdOn\": question[0][6],\n \"votes\": question[0][5]\n }, status)\n\n return response", "def put(self, problem_id):\n args = self.request.arguments\n x = args.pop('latitude')\n y = args.pop('longitude')\n args['location'] = create_location(x, y)\n self.sess.query(Problem).filter_by(id=int(problem_id)). \\\n update(args)\n\n self.sess.commit()\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"UPDATED\"\n )\n self.sess.add(activity)\n self.sess.commit()", "def _re_post(id):\r\n per_page = current_app.config['FLASKY_ANSWERS_PER_PAGE']\r\n answer_id = id\r\n answer = Answer.query.get_or_404(answer_id)\r\n post = answer.post\r\n pagination = Answer.query.order_by(Answer.timestamp.asc()).filter_by(post_id = post.id).all()\r\n answers = [answer.id for answer in pagination]\r\n\r\n page = answers.index(id)/per_page + 1\r\n id = post.id\r\n return redirect(url_for('.post',page=page,id=id,_anchor='answer-'+str(answer_id)))", "def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500", "def update_task_by_id(task_id):\n try:\n updated_task = get_task_from_request_form(request)\n tasks = mongo.db.tasks\n\n result = tasks.update_one(\n {\"_id\": ObjectId(task_id)},\n {\n \"$set\": {\n \"title\": updated_task['title'],\n \"reference\": updated_task['reference'],\n \"description\": updated_task['description'],\n \"status\": updated_task['status'],\n \"visible\": updated_task['visible']\n }\n })\n return json_util.dumps(get_task_by_id(task_id))\n except:\n abort(400)", "def edit_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.update_one({\"_id\": task[\"_id\"]}, {\"$set\": {\"content\": content}})\n\n response = jsonify()\n response.status_code = 200\n return response", "def post(self, question_id):\n\n data = request.json\n validate(data, ANSWER_SCHEMA)\n\n if data['answer'] is not None and data['answer'].strip() == \"\":\n return ({\n \"message\": \"Please provide an answer\"}), 400\n\n\n new_answer = Answers(question_id, data['answer'])\n response = new_answer.post_answer()\n\n return response", "def check(self, answer_id: str) -> dict:\n result: bool = self.__check_answer_id(answer_id)\n if result:\n self.session_facade.handle_correct_answer()\n else:\n self.session_facade.handle_incorrect_answer()\n\n return {'success': result}", "def get_by_id(self):\n\n con, response = psycopg2.connect(**self.config), None\n cur = con.cursor(cursor_factory=RealDictCursor)\n\n try:\n\n query = \"SELECT * FROM questions WHERE id = %s;\"\n cur.execute(query, [self.question_id])\n con.commit()\n response = cur.fetchone()\n\n if response:\n\n query2 = \"SELECT * FROM answers WHERE question_id=%s\"\n cur.execute(query2, [self.question_id])\n\n queryset_list = cur.fetchall()\n response[\"answers\"] = queryset_list\n\n return response\n\n except Exception as e:\n print(e)\n\n con.close()\n api.abort(404, \"Question {} doesn't exist\".format(self.question_id))", "def find(self, answer):\n self._validate(answer)\n\n for index, existing in enumerate(self.answers):\n if answer.matches_dict(existing):\n return index\n\n return None", "def get(self, answer):\n position = self.find(answer)\n\n if position is None:\n raise ValueError(\"Answer instance does not exist in store\")\n else:\n return self.answers[position]['value']", "def answer(self):\n try:\n return Answer.objects.filter(question=self).all()[0]\n except Answer.DoesNotExist, IndexError:\n return None", "async def _edit_game_given(self, game_id, given):\n\n await self.bot.db.execute(\n \"\"\"\n UPDATE giveaways_game\n SET given = :given\n WHERE game_id = :game_id\n \"\"\",\n {\n 'game_id': game_id,\n 'given': int(given),\n }\n )\n\n await self.bot.db.commit()", "def patch(self, publication_id, question_id):\n validate_publication(publication_id)\n question = PublicationQuestion.query.filter(\n PublicationQuestion.id == question_id,\n PublicationQuestion.publication_id == publication_id,\n ).first()\n if question is None:\n return {\"message\": \"No question was found by that id\"}, 404\n question.reply = api.payload[\"reply\"]\n question.replied_at = dt.now()\n db.session.merge(question)\n db.session.commit()\n return api.marshal(question, publication_question_model), 200", "def get_question(self, id):\n question = ForetoldQuestion(id, self)\n question.refresh_question()\n return question", "def answer_question(request, control_question_pk, step):\n\n control_question_obj = ControlQuestion.objects.get(pk=control_question_pk)\n\n control_question_obj.answered_on = timezone.now()\n control_question_obj.answered_by = request.user\n control_question_obj.answer_correct = True\n\n # Save changes\n control_question_obj.save()\n\n return http.HttpResponseRedirect(request.META.get(\n 'HTTP_REFERER', '/') + '#setup_step_' + step)", "def process_answer(ans):\n\n #TODO: check whether need type coversion?\n ans['parentid'] = int(ans['parentid'])\n ## I remain comments here, maybe can do some sentiment analysis to evaluate score of answer\n return ans", "def put(self, id):\n return None, 204", "def get_task_answer(answer):\n self.task_answer = answer", "def test_update_answer_invalid_answser_id(self):\n user_token, _, question_id, _ = self.add_answer()\n\n headers = self.get_request_header(user_token)\n data = json.dumps(self.update_answer)\n url = f'/questions/{question_id}/answers/0'\n\n response = self.test_client.put(url, headers=headers, data=data)\n\n self.assertEqual(response.status_code, 400)", "def test_editing_goal(self):\n\n form_data = {\"goal-body\": \"Goal body edit.\"}\n goal = edit_patient_goal(1, form_data)\n\n self.assertEqual(\"Goal body edit.\", goal.goal_body)", "def question_switch(request, id):\n question = get_object_or_404(Question, pk=id)\n\n # qproposal - endorse part\n categories = Category.objects.filter(name='proposed')\n proposed_cat = categories[0] if categories else None\n if question.category == proposed_cat:\n player = question.proposed_by.get_profile()\n staff_user = request.user\n amount = 0\n for tag in question.tags.all():\n if tag.name == 'qotd':\n amount = QOTD_GOLD\n elif tag.name == 'challenge':\n amount = CHALLENGE_GOLD\n elif tag.name == 'quest':\n amount = QUEST_GOLD\n\n # Question is endorsed\n if not question.endorsed_by:\n question.endorsed_by = staff_user\n question.save()\n scoring.score(player, None, 'bonus-gold', external_id=staff_user.id,\n gold=amount)\n\n # Endorsement is removed from question\n else:\n question.endorsed_by = None\n question.save()\n amount *= -1\n scoring.score(player, None, 'bonus-gold', external_id=staff_user.id,\n gold=amount)\n\n # regular activation of question\n else:\n question.active = not question.active\n question.save()\n\n go_back = request.META.get('HTTP_REFERER', None)\n if not go_back:\n go_back = reverse('wouso.interface.cpanel.views.qpool_home')\n\n return HttpResponseRedirect(go_back)", "def _Answer(questionId, choice):\n\n self.assertEqual(questionId, self.vmQuestion.runtime.question.id,\n '_Answer got questionId == \"%s\"' % questionId +\n '; expected \"%s\"'\n % self.vmQuestion.runtime.question.id)\n self.assertEqual(choice, self.rawInputStub.answer,\n '_Answer got choice == \"%s\"' % choice +\n '; expected \"%s\"' % self.rawInputStub.answer)", "def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500", "def test_update_answer_invalid_question_id(self):\n user_token, _, _, answer_id = self.add_answer()\n\n headers = self.get_request_header(user_token)\n data = json.dumps(self.update_answer)\n url = f'/questions/0/answers/{answer_id}'\n\n response = self.test_client.put(url, headers=headers, data=data)\n\n self.assertEqual(response.status_code, 400)", "def put(self, id):\n return add_comment(request.json, id)", "def solve(self, bot, update, args):\n request = ' '.join(args)\n result = ask(request)\n if result is None:\n result = \"I don't know, Morty.\"\n bot.send_message(chat_id=update.message.chat_id, text=result)", "def ballot_get_contest_by_id(contest_id):\r\n return make_request({\"method\": \"ballot_get_contest_by_id\",\r\n \"params\": [contest_id],\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 0, })", "def downvote_question(question_id):\n \n question = question_object.get_question(question_id)\n \n if question:\n downvote_question = question\n downvote_question['votes'] = downvote_question['votes'] - 1\n return jsonify({\"status\": 200, \"data\": downvote_question}), 200\n\n\n return jsonify({\"status\": 404, \"error\": \"Question not found\"}), 404", "def get_question_of_answer(answer):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n question_id = answer.get(\"QuestionId\")\n # query topic_id of the question\n try:\n response = question_table.get_item(Key={\"QuestionId\": question_id})\n question = response[\"Item\"]\n except:\n print(\"No question found, returning None..\")\n return None\n return question", "def update_vote(self):\n if not self.answer_id:\n return False\n try:\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n query = \"UPDATE votes SET vote=%s WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.vote_value, self.answer_id, self.user_id))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True", "def check_answer(self, chat_id, answer):\n\n language = self.user.get_lang()\n if answer == language:\n self.user.increase_score()\n\n message_text = \"Correct!\"\n self.bot.sendMessage(chat_id, message_text)\n\n self.user.set_lang_path(\"\") # resets current_lang_path in database\n self.send_track(chat_id)\n else:\n message_text = \"You answer is incorrect. Try again.\"\n self.bot.sendMessage(chat_id, message_text)", "def update(self)->None:\n database.cursor.execute(\"UPDATE rsvps SET response = %s WHERE meetup = %s AND user_id = %s\", (\n self.response,\n self.meetup,\n self.user\n ))\n database.connectio", "def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204", "def vote(request, question_id):\n\n user = request.user\n if not user.is_authenticated:\n return redirect('login')\n\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n context = {\n 'question' : question,\n 'error_message' : \"You didn't select a choice.\"\n }\n return render(request, 'polls/detail.html',context)\n else:\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('results',args=(question.id,)))", "def update_problem(problem_id, problem):\n Firebase = firebase.FirebaseApplication('https://team1robotsim.firebaseio.com/', None)\n result = Firebase.get('/problems', 'id_' + str(problem_id))\n \n try:\n if result is not None:\n \tproblem = Problem.from_dict(connexion.request.get_json())\n \treturn jsonify(Firebase.put('/problems', 'id_' + str(problem_id), problem))\n except ValueError:\n return jsonify(Error(404, \"Problem not found\")), status.HTTP_404_NOT_FOUND", "def put(self, id):\n data = request.json\n update_scenario(id, data)\n return None, 204", "def quiz(update: Update, context: CallbackContext) -> None:\n questions = [\"1\", \"2\", \"4\", \"20\"]\n message = update.effective_message.reply_poll(\n \"How many eggs do you need for a cake?\", questions, type=Poll.QUIZ, correct_option_id=2\n )\n # Save some info about the poll the bot_data for later use in receive_quiz_answer\n payload = {\n message.poll.id: {\"chat_id\": update.effective_chat.id, \"message_id\": message.message_id}\n }\n context.bot_data.update(payload)", "def delete_answer(request, answer_id):\n raise NotImplementedError", "def get_answers_by_answer_id(self, answer_id):\n return self._answers_by_id.get(answer_id)", "def put(self, id):\n req = api.payload\n try:\n result = update_task(\n get_db(),\n id,\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def checkAnswer2(questionID):\n questionGuess = request.args.get('questionGuess', 'FORM ERROR')\n print(\"{0} {1}\".format(questionID, questionGuess))\n return prepJSON(cs411_answers.checkAnswer(questionID, questionGuess))", "def test_editing_patient_goals(self):\n\n data = {\"goal-body\": \"Edited goal body.\"}\n result = self.client.post(\"/goal/1/edit.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Edited goal\", result.data)", "def handle_id_answer(self, response_text):\n id_answer = None\n try:\n json_response = json.loads(response_text)\n id_answer = json_response[\"request\"]\n except json.decoder.JSONDecodeError:\n elements_splitted = response_text.split(\"|\")\n if elements_splitted and len(elements_splitted) >= 2:\n id_answer = elements_splitted[1]\n return id_answer", "def checkAnswer(questionID):\n questionGuess = request.args.get('questionGuess', 'FORM ERROR')\n print(\"{0} {1}\".format(questionID, questionGuess))\n return prepJSON(cs411_answers.checkAnswer2(questionID, questionGuess))", "def get_question(self, id):\n\t\tif id < len(self.questions) and id >= 0:\n\t\t\treturn self.questions[id]\n\t\telse:\n\t\t\treturn None", "def get_user_answer_to_poll():\n try:\n poll_id = request.args.get('poll_id')\n user_name = request.args.get('user_name')\n answer = service.get_user_answer(user_name, poll_id)\n if answer == '':\n return make_response(answer, 204)\n return make_response(str(answer), 200)\n\n except Exception as e:\n print(e)\n return make_response(\"Failed while fetching the answer\", 500)", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def patch(self, id):\n try:\n task = update_status(get_db(), id, Status[api.payload[\"status\"]])\n if not task:\n api.abort(404, \"Invalid Task\")\n return task_to_dict(task)\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def set_answer(data):\n # get data\n user_id = data[\"user_id\"]\n user = store.get_user_by_id(user_id)\n answer_id = data[\"answer_id\"]\n answer = store.get_answer_by_id(answer_id)\n quiz = store.get_quiz_by_user_id(user_id)\n\n # check if it is time to go to the next question, if needed\n quiz.next_question()\n\n # get the question\n question_id = quiz.get_current_question_id()\n question = store.get_question_by_id(question_id)\n\n # check if enough data to answer the question\n if user_id and answer:\n\n # get the users answers for this question (user is still scoped to quiz, so user == quiz)\n user_answers = store.get_user_answers_by_user_and_question_id(\n user_id, answer.question_id)\n\n # if correct and no previous answer found and the question is still active\n if not len(user_answers) and answer.question_id == question_id:\n\n # create a new answer\n new_user_answer = UserAnswer(\n answer.question_id, answer_id, user_id)\n\n # store new answer and increment the store\n store.set_user_answer(new_user_answer)\n if answer.is_correct:\n user.score += question.score\n question = store.get_question_by_id(answer.question_id)", "def upvote_question(question_id):\n\n question = question_object.get_question(question_id)\n \n if question:\n upvote_question = question\n upvote_question['votes'] = upvote_question['votes'] + 1\n return jsonify({\"status\": 200, \"data\": upvote_question}), 200\n\n return jsonify({\"status\": 404, \"error\": \"Question not found\"}), 404", "def downvote_question(self, question_id):\n locations, vote_id = [\"question_id\", \"user_id\", \"meetup_id\", \"vote\"], ''\n\n question = self.sql.fetch_details_by_criteria(\n \"question_id\", question_id, \"questions\")\n\n votes = self.sql.fetch_details_by_criteria(\n \"question_id\", question_id, \"votes\")\n\n if not question:\n return self.makeresp(\"Question not found\", 404)\n\n isempty = DataValidators(\n self.question_details).check_values_not_empty()\n\n if isinstance(isempty, str):\n return self.makeresp(isempty, 400)\n\n try:\n user = self.sql.fetch_details_by_criteria(\n \"user_id\", self.question_details[\"user\"], \"users\")\n\n except KeyError as keyerr:\n return self.makeresp(\"{} is a required key\".format(keyerr), 400)\n\n if not user:\n return self.makeresp(\"User does not exist. Please register first\", 404)\n\n voted_users = [\n user for user in votes if self.question_details[\"user\"] in user]\n\n if voted_users:\n\n vote_id = voted_users[0][0]\n\n if [vote[3] for vote in voted_users if \"down\" in vote]:\n\n return self.makeresp(\"You have already downvoted this question\", 403)\n\n else:\n\n self.sql.update_votes(vote_id, \"down\")\n\n elif not voted_users:\n\n details = {\n \"question\": question_id,\n \"user\": self.question_details[\"user\"],\n \"vote\": \"down\",\n \"meetup\": question[0][1]\n }\n\n vote_id = SqlHelper(details).save_to_database(locations, \"votes\")\n\n data = self.sql.vote_question(question_id, \"down\")\n\n return self.makequestionresponse(data, vote_id)", "def goals_for(game_id, player=None, goal_name=None):\n test_game = game.Game(parse_game.parse_game( \\\n codecs.open('testing/testdata/'+game_id, encoding='utf-8').read()))\n found_goals = goals.check_goals(test_game)\n\n if player:\n found_goals = [g for g in found_goals if g['player'] == player]\n\n if goal_name:\n found_goals = [g for g in found_goals if g['goal_name'] == goal_name]\n\n return found_goals", "def edit_comment(self, id, body, **args):\n args.update(id=id, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)", "def get_problem(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/problem/{id}\")", "def update_goal_info(self):\n self._goal_info_cache = self._get_goal_info()", "def get_single_question(self, id):\n query = (\"SELECT * FROM tbl_questions WHERE question_id = %s;\")\n inputs = id\n user_requests = get_query(query, inputs)\n return user_requests", "def insert_answer_from_telegram():\n try:\n chat_id = request.args.get('chat_id')\n answer = request.args.get('answer')\n poll_id = request.args.get('poll_id')\n user_name = user_service.get_user_name_by_chat_id(chat_id)\n if user_service.check_user(user_name):\n service.insert_answer(answer, poll_id, user_name)\n else:\n return make_response(\"Could not find user_name\", 501)\n\n except Exception as e:\n print(e)\n return make_response(\"Could not insert answer\", 500)\n return make_response(\n \"Insert answer\\n\" +\n \"\\nPoll id:\" + str(poll_id) +\n \"\\nUser name:\" + str(user_name) +\n \"\\nAnswer:\" + str(answer), 200)", "def get_res_by_id(self,qid):\n return self._run[qid]", "def put(self, id ):\n adm = Administration()\n print(api.payload)\n p = Profile.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_profile(p)\n return p, 200\n else:\n return '', 500", "def puzzle_view(request, puzzle_id):\n puzzle = get_object_or_404(Puzzle, puzzle_id__iexact=puzzle_id)\n team = puzzle.hunt.team_from_user(request.user)\n\n if(team is not None):\n request.ratelimit_key = team.team_name\n\n is_ratelimited(request, fn=puzzle_view, key='user', rate='2/10s', method='POST',\n increment=True)\n if(not puzzle.hunt.is_public):\n is_ratelimited(request, fn=puzzle_view, key=get_ratelimit_key, rate='5/m', method='POST',\n increment=True)\n\n if(getattr(request, 'limited', False)):\n logger.info(\"User %s rate-limited for puzzle %s\" % (str(request.user), puzzle_id))\n return HttpResponseForbidden()\n\n # Dealing with answer submissions, proper procedure is to create a submission\n # object and then rely on Submission.respond for automatic responses.\n if request.method == 'POST':\n if(team is None):\n if(puzzle.hunt.is_public):\n team = puzzle.hunt.dummy_team\n else:\n # If the hunt isn't public and you aren't signed in, please stop...\n return HttpResponse('fail')\n\n form = AnswerForm(request.POST)\n form.helper.form_action = reverse('huntserver:puzzle', kwargs={'puzzle_id': puzzle_id})\n\n if form.is_valid():\n user_answer = form.cleaned_data['answer']\n s = Submission.objects.create(submission_text=user_answer, team=team,\n puzzle=puzzle, submission_time=timezone.now())\n s.respond()\n else:\n s = None\n\n # Deal with answers for public hunts\n if(puzzle.hunt.is_public):\n if(s is None):\n response = \"Invalid Submission\"\n is_correct = None\n else:\n response = s.response_text\n is_correct = s.is_correct\n\n context = {'form': form, 'puzzle': puzzle, 'PROTECTED_URL': settings.PROTECTED_URL,\n 'response': response, 'is_correct': is_correct}\n return render(request, 'puzzle.html', context)\n\n if(s is None):\n return HttpResponseBadRequest(form.errors.as_json())\n\n # Render response to HTML for live hunts\n submission_list = [render_to_string('puzzle_sub_row.html', {'submission': s})]\n\n try:\n last_date = Submission.objects.latest('modified_date').modified_date.strftime(DT_FORMAT)\n except Submission.DoesNotExist:\n last_date = timezone.now().strftime(DT_FORMAT)\n\n # Send back rendered response for display\n context = {'submission_list': submission_list, 'last_date': last_date}\n return HttpResponse(json.dumps(context))\n\n # Will return HTML rows for all submissions the user does not yet have\n elif request.is_ajax():\n if(team is None):\n return HttpResponseNotFound('access denied')\n\n # Find which objects the user hasn't seen yet and render them to HTML\n last_date = datetime.strptime(request.GET.get(\"last_date\"), DT_FORMAT)\n last_date = last_date.replace(tzinfo=tz.gettz('UTC'))\n submissions = Submission.objects.filter(modified_date__gt=last_date)\n submissions = submissions.filter(team=team, puzzle=puzzle)\n submission_list = [render_to_string('puzzle_sub_row.html', {'submission': submission})\n for submission in submissions]\n\n try:\n last_date = Submission.objects.latest('modified_date').modified_date.strftime(DT_FORMAT)\n except Submission.DoesNotExist:\n last_date = timezone.now().strftime(DT_FORMAT)\n\n context = {'submission_list': submission_list, 'last_date': last_date}\n return HttpResponse(json.dumps(context))\n\n else:\n # Only allowed access if the hunt is public or if unlocked by team\n if(not puzzle.hunt.is_public):\n if(not request.user.is_authenticated):\n return redirect('%s?next=%s' % (reverse_lazy(settings.LOGIN_URL), request.path))\n\n if (not request.user.is_staff):\n if(team is None or puzzle not in team.unlocked.all()):\n return render(request, 'access_error.html', {'reason': \"puzzle\"})\n\n # The logic above is negated to weed out edge cases, so here is a summary:\n # If we've made it here, the hunt is public OR the user is staff OR\n # the user 1) is signed in, 2) not staff, 3) is on a team, and 4) has access\n if(team is not None):\n submissions = puzzle.submission_set.filter(team=team).order_by('pk')\n disable_form = puzzle in team.solved.all()\n else:\n submissions = None\n disable_form = False\n form = AnswerForm(disable_form=disable_form)\n form.helper.form_action = reverse('huntserver:puzzle', kwargs={'puzzle_id': puzzle_id})\n try:\n last_date = Submission.objects.latest('modified_date').modified_date.strftime(DT_FORMAT)\n except Submission.DoesNotExist:\n last_date = timezone.now().strftime(DT_FORMAT)\n context = {'form': form, 'submission_list': submissions, 'puzzle': puzzle,\n 'PROTECTED_URL': settings.PROTECTED_URL, 'last_date': last_date, 'team': team}\n return render(request, 'puzzle.html', context)", "def get_proof_item(self, id):\n return self.prf.find_item(id)", "def check_answer(update: Update, context: CallbackContext):\n cleaned_text = update.message.text.strip().lower()\n cleaned_soln = context.chat_data['solution'].strip().lower()\n if cleaned_text == cleaned_soln:\n\n # Cancel current question\n chat_id = update.message.chat_id\n data = {'chat_id': chat_id,'context': context}\n chat_jobs = context.job_queue.get_jobs_by_name(str(chat_id))\n for job in chat_jobs:\n job.schedule_removal()\n\n # Update chat with answer\n name = update.message.from_user.first_name\n soln = context.chat_data['solution']\n update.message.reply_text(f'Correct! {name} got the right answer! It was {soln}.')\n\n # Prevent answer trigger\n context.chat_data['solution'] = ''\n\n # Update scores\n user_id = update.message.from_user.id\n if user_id not in context.chat_data['user']:\n context.chat_data['user'][user_id] = dict()\n context.chat_data['user'][user_id]['name'] = name\n context.chat_data['user'][user_id]['points'] = context.chat_data['user'][user_id].get('points', 0) + 1\n\n # Schedule run_quiz if there are more questions\n if context.chat_data['number'] < context.chat_data['total']:\n context.job_queue.run_once(run_quiz, 3, context=data, name=str(chat_id))# Delay time to next question, question answered\n\n # Schedule end_quiz if there are no more questions\n else:\n context.job_queue.run_once(end_quiz, 3, context=data, name=str(chat_id))# Delay time to end quiz, question answered", "def get(self, _id):", "def PromptForId(odb, message, orig_id=1):\n\n print 'Is this prediction for someone other than the poster?\\n\\n%s\\n\\n' % \\\n (message['Text'])\n diff_user = raw_input('(y/n): ')\n\n if diff_user == 'n':\n return orig_id\n \n user_name = raw_input('Username this prediction is for? ')\n user_id = odb.GetUserId(user_name)\n\n if user_id is None:\n print 'Unrecognized username, try again.\\n'\n return PromptForId(odb, message, orig_id)\n\n else:\n return user_id", "def add_answer(self, text, responder_id):\n answer = text.split('[Answer]')[1].strip()\n m = re.search('\\[(qid):([0-9]*)\\]', answer)\n if m is not None:\n question_id = m.group(2)\n answer_text = answer.split('[qid:{0}]'.format(question_id))[1].strip()\n # stores present answer\n self.cur.execute(\n \"INSERT INTO answer (answer, responder_id, question_id) VALUES (%s, %s, %s);\",\n (answer_text, responder_id, question_id))\n self.cur.execute(\n \"INSERT INTO users (user_id) SELECT (%s) WHERE NOT EXISTS (SELECT * FROM users WHERE user_id=%s);\",\n (str(responder_id), str(responder_id)))\n self.event_handler.new_answer(question_id, answer, responder_id)\n else:\n self.stored_answer = False" ]
[ "0.6852361", "0.6565505", "0.5933789", "0.5877702", "0.58608663", "0.5800607", "0.57284534", "0.5717084", "0.5700604", "0.5696132", "0.5641497", "0.5632813", "0.56148297", "0.5601214", "0.55787456", "0.5564735", "0.5534567", "0.549483", "0.5468139", "0.5458586", "0.54545426", "0.54520226", "0.5450927", "0.5427241", "0.5410992", "0.5407414", "0.5400134", "0.53833663", "0.53553784", "0.534294", "0.533144", "0.5298315", "0.5294581", "0.52936995", "0.5292643", "0.52765304", "0.5276382", "0.527334", "0.52694964", "0.5265499", "0.5260504", "0.5258308", "0.5256215", "0.52558094", "0.5252306", "0.52444464", "0.5242799", "0.5208541", "0.52004683", "0.51968396", "0.51856166", "0.5181955", "0.51742524", "0.5165297", "0.51504886", "0.51492596", "0.5143373", "0.5140442", "0.51393527", "0.512803", "0.51220065", "0.5120591", "0.51153654", "0.5109419", "0.510065", "0.5099392", "0.5092062", "0.50858", "0.50852305", "0.5085188", "0.508391", "0.5080142", "0.5078667", "0.505523", "0.504312", "0.50351536", "0.5028899", "0.5028784", "0.5027723", "0.50215524", "0.50065255", "0.49906778", "0.49900052", "0.49892965", "0.4986463", "0.4985593", "0.49756148", "0.4966981", "0.49636212", "0.4960719", "0.49582842", "0.4955221", "0.49548253", "0.4952884", "0.49466679", "0.4946512", "0.49204975", "0.49188763", "0.49187693", "0.4902411" ]
0.653086
2
Tokenization/string cleaning for all datasets except for SST.
def clean_str(string): string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleaning (data):", "def clean_data(self, data):\r\n data=data.lower()\r\n doc=nlp(data, disable=['parser', 'ner'])\r\n \r\n #Removing stopwords, digits and punctuation from data\r\n tokens = [token.lemma_ for token in doc if not (token.is_stop\r\n or token.is_digit\r\n or token.is_punct\r\n )]\r\n \r\n tokens = \" \".join(tokens)\r\n return tokens", "def clean_article(self):\n # split into tokens by white space\n tokens = self.text.split(\" \")\n # remove punctuation from each token\n table = str.maketrans('', '', punctuation)\n tokens = [w.translate(table) for w in tokens] # type: List[Any]\n # remove remaining tokens that are not alphabetic\n tokens = [word for word in tokens if word.isalpha()]\n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # lemmatization and lowercase\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(w.lower()) for w in tokens]\n # filter out short tokens\n tokens = [word for word in tokens if len(word) > 1]\n return tokens", "def test_drop_empty_tokens():\n assert TextCleaner().transform([[[\",;\", \"hi\"]]])[\"corpus\"][0] == [\"hi\"]", "def _clean_text(self, X):\n\n def normalize(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n return text.lower()\n\n for col_name in X.columns:\n # we assume non-str values will have been filtered out prior to calling TextFeaturizer. casting to str is a safeguard.\n col = X[col_name].astype(str)\n X[col_name] = col.apply(normalize)\n return X", "def clean_raw_data(self, text):\r\n return [token.lower() for token in nltk.word_tokenize(text)\r\n if token not in self.stop_words and token not in punctuation]", "def data_clean(df, name=\"Tweet\"):\n tic = timer()\n twts = []\n # Define a punctuation dictionary so that we can replace each punctuation with an empty space.\n table = str.maketrans('', '', string.punctuation)\n stopWords = set(stopwords.words('senti')) # Set stop words language to English\n for n in range(df[name].shape[0]):\n text = df[name][n]\n tokens = text.split() # Split each tweet into list of words.\n tokens = filter(lambda x: x[0] != '@', tokens) # Remove mentions\n tokens = [word.translate(table) for word in tokens] # Remove punctuation marks\n tokens = [word for word in tokens if word.isalpha()] # Remove any word that is not completely alphabetic.\n tokens = [word for word in tokens if len(word) > 1] # Remove any word that is shorter than two letters\n tokens = [word.lower() for word in tokens]\n tokens = [word for word in tokens if not word in stopWords] # Remove any stopwords\n # Modified for dumping data without additional commas in csv file\n token = \"\"\n for i in tokens:\n token += (i + \" \")\n twts.append(token)\n toc = timer()\n print(\"Time for cleaning tweets\", (toc - tic))\n return twts", "def clean_up_tokenization_spaces(out_string):\n out_string = out_string.replace('<unk>', '')\n out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','\n ).replace(\" ' \", \"'\").replace(\" n't\", \"n't\").replace(\" 'm\", \"'m\").replace(\" do not\", \" don't\"\n ).replace(\" 's\", \"'s\").replace(\" 've\", \"'ve\").replace(\" 're\", \"'re\")\n return out_string", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def cleaning(self, document):\n remove_punct = ''.join(i for i in document.lower() if i not in self.punctuation)\n tokenized = [i for i in remove_punct.split() if i not in self.stopwords]\n if self.lang is not 'chinese':\n # Lemmatizes if not chinese\n tokenized = [self.lemmatize.lemmatize(i) for i in tokenized]\n return tokenized", "def test_cleaner_tokenized():\n X = Tokenizer().transform(X_text)\n X = TextCleaner().transform(X)\n assert isinstance(X[\"corpus\"][0], list)", "def full_cleanse(data):\n tokenizer = RegexpTokenizer(r'\\w+')\n stops = set(stopwords.words('english'))\n\n sent_toks = []\n for text in data:\n try:\n text = tokenizer.tokenize(text)\n pos_tagged = nltk.pos_tag(text)\n words = [w[0] for w in pos_tagged if w[1].capitalize() != 'NNP']\n words = [WordNetLemmatizer().lemmatize(w) for w in words]\n words = [w.lower() for w in words if not w.lower() in stops]\n words = [w for w in words if not w.isdigit()]\n sent_toks.append(words)\n except TypeError:\n pass\n return sent_toks", "def clean_all(self, tweet):\n tweet = self.clean_urls(tweet)\n tweet = self.clean_hashtags(tweet)\n tweet = self.clean_mentions(tweet)\n tweet = self.clean_emojis_and_smileys(tweet)\n tweet = self.clean_unnecessary_characters(tweet)\n tweet = self.clean_reserved_words(tweet)\n\n return tweet", "def clean(dataset_path: str) -> str:\n def _remove_unused(text: str):\n clean_data = text.lower().strip()\n clean_data = re.sub(\n r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',\n \" \", clean_data)\n clean_data = re.sub(r\"<.*>\", \"\", clean_data)\n clean_data = re.sub(r\"@[a-zA-Z0-9_]+\", \"\", clean_data)\n clean_data = clean_data.replace(\"\\n\", \"\")\\\n .replace(\"#\", \"\")\n return clean_data\n\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str\n }\n\n if \"train\" in dataset_path:\n dtypes[\"target\"] = int\n\n new_path = _make_new_filepath(dataset_path, \"clean\")\n df = pd.read_csv(f\"/data/{dataset_path}\", index_col=\"id\", dtype=dtypes)\n df[\"text\"] = df[\"text\"].apply(_remove_unused)\n df.to_csv(f\"/data/{new_path}\")\n return new_path", "def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def untokenize(self):\n return ''.join([t[self.TEXT_WS] for t in self.data]).strip()", "def untokenize(self):\n return ''.join([t[self.TEXT_WS] for t in self.data]).strip()", "def data_cleaner(doc):\n \n sw = stopwords.words('english')\n regex_token = RegexpTokenizer(r\"([a-zA-Z]+(?:’[a-z]+)?)\")\n doc = regex_token.tokenize(doc)\n doc = [word.lower() for word in doc]\n doc = [word for word in doc if word not in sw]\n #print(doc)\n doc = pos_tag(doc)\n doc = [(word[0], get_wordnet_pos(word[1])) for word in doc]\n #print(doc)\n lemmatizer = WordNetLemmatizer() \n doc = [lemmatizer.lemmatize(word[0], word[1]) for word in doc]\n #print(' '.join(doc))\n return ' '.join(doc)", "def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # substitute multiple whitespace with single whitespace\n # Also, removes leading and trailing whitespaces\n text_no_doublespace = re.sub('\\s+', ' ', text_nopunct).strip()\n #tokenise text\n tokenised_text = text_no_doublespace.split()\n for word in tokenised_text:\n if len(word) == 1:\n tokenised_text.remove(word)\n #if word is a stop word, remove it from the list\n elif word in stopwords.words('english'):\n tokenised_text.remove(word)\n #de-tokenise text\n cleaned_text = ' '.join(tokenised_text)\n return cleaned_text", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def stage_two_preprocessing(data: pd.Series) -> pd.Series:\n # designed to be run after remove_contractions\n data_ = data.dropna()\n data_ = remove_punctuation(data_)\n data_ = numbers_to_words(data_)\n data_ = remove_stopwords(data_)\n return data_", "def preprocessing(data):\n #tokenizer = RegexpTokenizer(r'\\w+') # allow charachter only\n #words = tokenizer.tokenize(data) # tokenize : convert to words\n words = word_tokenize(data)\n # remove stop words & stemming\n new_words = []\n for word in words:\n if word not in stop_words:\n new_words.append(stemmer.stem(word)) # append to new words with stemming\n \n if '' in new_words: new_words.remove('') # remove space from list\n #print(\"Preprocessing : {}\".format(new_words))\n return new_words", "def _clean_data(sent, sw, language='ch'):\n if language == 'ch':\n sent = re.sub(r\"[^\\u4e00-\\u9fa5A-z0-9!?,。]\", \" \", sent)\n sent = re.sub('!{2,}', '!', sent)\n sent = re.sub('?{2,}', '!', sent)\n sent = re.sub('。{2,}', '。', sent)\n sent = re.sub(',{2,}', ',', sent)\n sent = re.sub('\\s{2,}', ' ', sent)\n if language == 'en':\n sent = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", sent)\n sent = re.sub(r\"\\'s\", \" \\'s\", sent)\n sent = re.sub(r\"\\'ve\", \" \\'ve\", sent)\n sent = re.sub(r\"n\\'t\", \" n\\'t\", sent)\n sent = re.sub(r\"\\'re\", \" \\'re\", sent)\n sent = re.sub(r\"\\'d\", \" \\'d\", sent)\n sent = re.sub(r\"\\'ll\", \" \\'ll\", sent)\n sent = re.sub(r\",\", \" , \", sent)\n sent = re.sub(r\"!\", \" ! \", sent)\n sent = re.sub(r\"\\(\", \" \\( \", sent)\n sent = re.sub(r\"\\)\", \" \\) \", sent)\n sent = re.sub(r\"\\?\", \" \\? \", sent)\n sent = re.sub(r\"\\s{2,}\", \" \", sent)\n if sw is not None:\n sent = \"\".join([word for word in sent if word not in sw])\n\n return sent", "def strip_unsafe_tokens(string, parser=ANSI_PARSER):\n return parser.strip_unsafe_tokens(string)", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "def clean_up_tokenization(self, out_string: str) -> str:\r\n out_string = (\r\n out_string.replace(\" .\", \".\")\r\n .replace(\" ?\", \"?\")\r\n .replace(\" !\", \"!\")\r\n .replace(\" ,\", \",\")\r\n .replace(\" ' \", \"'\")\r\n .replace(\" n't\", \"n't\")\r\n .replace(\" 'm\", \"'m\")\r\n .replace(\" 's\", \"'s\")\r\n .replace(\" 've\", \"'ve\")\r\n .replace(\" 're\", \"'re\")\r\n )\r\n return out_string", "def clean_unnecessary_whitespaces(self, tweet):\n tweet = ' '.join(tweet.split())\n\n return tweet", "def cleaninto_df(frame:pd) -> pd:\n # remove repeated characters EXAMPLE: DIMPLLLLEEEEE -> DIMPLE\n # nopunc = word_tokenize(nopunc) this might not work. find something else\n\n stop = stopwords.words('english')\n newStopWords = ['get', 'http','there','and','i','t','it','d']\n stop.extend(newStopWords)\n lemmatizer = WordNetLemmatizer()\n clean = []\n new_col = []\n frame['Cleaned'] = None\n for tweet in frame.content:\n if 'RT' in tweet:\n if tweet.index('RT')>5:\n tweet = tweet[:tweet.index('RT')]\n else:\n tweet = tweet[2:]\n # WHAT ARE WE TRYING TO CLEAN HERE?\n # cleaning with preprocessor library https://pypi.org/project/tweet-preprocessor/\n tweet = ' '.join(re.sub(\"(@\\w+)|([^A-Za-z]+)|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n # changes #November1 -> November: need to remove full hashtag?\n # changes @poetweatherford: -> poetweatherford\n # changes don’t -> don t, children's -> children s\n print(\"after regex:\" + str(tweet))\n clean.append(tweet.lower())\n for clean_tweet in clean:\n word_tokens = word_tokenize(clean_tweet)\n clean_tokens = [word for word in word_tokens if word not in stop]\n stems = []\n for item in clean_tokens:\n stems.append(lemmatizer.lemmatize(item))\n new_sentence = ' '.join(stems)\n new_col.append(new_sentence.lower())\n frame['Cleaned'] = new_col\n return frame", "def stage_one_preprocessing(data: pd.Series) -> pd.Series:\n data_ = data.dropna()\n print('ascii')\n data_ = remove_non_ascii(data)\n print('lower')\n data_ = to_lowercase(data_)\n print('slash')\n data_ = underscore_and_slash_to_space(data_)\n print('ellipse')\n data_ = remove_ellipses(data_)\n print('white')\n data_ = shrink_whitespace(data_)\n #print('contracts')\n #data_ = remove_contractions(data_)\n return data_", "def remove_bad_chars(self, corpus: List[str]) -> List[str]:\n corpus_clean: List[str] = list()\n for doc in corpus:\n doc_tmp = \"\"\n doc_tmp = re.sub(self.bad_chars, \"\", doc)\n corpus_clean.append(doc_tmp)\n return corpus_clean", "def _setCleanString(self, words):\n \n if self.type=='DIAGNOSIS':\n excludes = ['DX']\n elif self.type == 'SECOND_LEVEL_DIAGNOSIS':\n #excludes = ['Assessment','Impression', 'Possible', 'ModifierCertainty']\n excludes = ['Assessment','Impression']\n elif self.type == 'DRUG':\n excludes = ['Route']\n elif self.type == 'MEDICAL_HISTORY':\n excludes = ['History', 'MedicalHistory']\n elif self.type == 'FAMILY_HISTORY':\n excludes = ['History', 'FamilyHistory', 'Family']\n else:\n return self.string\n \n s = ''\n pretk = ','\n for i, w in enumerate(words):\n if self.tags[i][1] in excludes:\n continue\n elif self.tags[i][1]=='COMMA':\n if pretk==',': \n continue\n else:\n s += w\n pretk = w\n continue\n elif s=='': \n s += w\n else:\n s += ' ' + w\n pretk = w\n \n return s", "def cleanData(s):\n\n # extract only word tokens of at least 2 chars\n re.compile(r\"\\b\\w\\w + \\b\", re.U).findall(s)\n\n # xml_dict = {';': '', '&lt': '<', '&amp': '&', '&gt': '>', '&quot': '\"',\n # '&apos': '\\''}\n # for key, value in xml_dict.iteritems():\n # s = s.replace(key, value)\n s.translate(maketrans('?!,.', ' '))\n\n with open('stopwords.txt') as stop_words:\n stop_words = {line.strip().lower() for line in stop_words if line!='\\n'}\n\n return s", "def punctutation_removal(tokenised_document):\n\n tokenised_document = [\n i for i in tokenised_document if i not in punctuation]\n tokenised_document_cleaner = []\n for i in tokenised_document:\n word = re.split(r'\\W', i)\n if (len(word) == 1):\n tokenised_document_cleaner.append(word[0])\n elif (len(word) == 2):\n if (word[0].isalpha() == False):\n tokenised_document_cleaner.append(word[1])\n elif (word[1].isalpha() == False):\n tokenised_document_cleaner.append(word[0])\n elif (word[0].isalpha() == True and word[1].isalpha() == True): # can affect collocations\n tokenised_document_cleaner.append(word[0])\n tokenised_document_cleaner.append(word[1])\n # to remove null strings\n resultant_tokenised_doc = []\n for word in tokenised_document_cleaner:\n if word != '':\n resultant_tokenised_doc.append(word)\n return resultant_tokenised_doc", "def sanitize_text(tokens, stopwords=None):\n\n tokens = [x.lower() for x in tokens]\n regex = re.compile('[^a-z]')\n\n for index in range(len(tokens)):\n tokens[index] = regex.sub('', tokens[index])\n if stopwords and tokens[index] in stopwords:\n tokens[index] = ''\n\n # remove empty elements\n tokens = [token for token in tokens if token != '']\n return tokens", "def strip_unsafe_tokens(self, string):\n return self.unsafe_tokens.sub(\"\", string)", "def set_clean_tokens(raw_token_list):\n\tlogger.debug('Cleaning Text')\n\n\tclean_tokens = []\n\tfor t in raw_token_list:\n\t\tclean_token = clean_word(t)\n\t\tif clean_token != \"\":\n\t\t\tclean_tokens.append(clean_token)\n\n\treturn set(clean_tokens)", "def clean_data(s):\n s = s.strip()\n s = s.lower()\n return s", "def detokenize(tokens):\n pass", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def preprocess(data):\n\n #remove urls and convert to lowercase\n #used this thread for help on urls: https://stackoverflow.com/questions/11331982/how-to-remove-any-url-within-a-string-in-python\n remove_url = [re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', x) for x in data]\n lower=[x.lower() for x in remove_url]\n\n #remove all non alphanumeric chars and empty strings\n return filter(None, [re.sub(r'\\W','',x) for x in lower])", "def clean_text(s, remove_stop_words=True, correct_spelling_mistakes=True):\n if type(s) is float: # some elements in Visite_ZNS are \"nan\"\n return \"\"\n \n s = s.lower() #s lowercase\n\n s = s.replace('4/4', '44') # 4/4 [Extremitäten] würde sonst zu 2 separaten tokens werden.\n s = s.replace('/', '/ ') # extra leerzeichen, sodass Worte die\n # vorher durch '/' getrennt waren nicht\n # zu einem gemeinsamen Token werden\n\n # filter invalid characters from tect:\n filtered_str = ''.join(filter(lambda ch: ch in allowed_chars, s))\n \n # remove common ambiguities through substitutions:\n replacements = [\n ('v a', 'va'),\n ]\n for to, fro in replacements:\n filtered_str = filtered_str.replace(f' {to} ', f' {fro} ') # vor allem.\n tokens = filtered_str.split()\n\n # remove '-' from all tokens, except tokens such as '-n'\n filter_hyphens_inside_words = lambda t: t.replace('-', '') if not (len(t) > 1 and t.find('-') == 0 and t[1].isdigit()) else t\n tokens = [filter_hyphens_inside_words(t) for t in tokens]\n \n # remove tokens with only 1 character:\n tokens = [t for t in tokens if len(t) > 1]\n\n # finally, correct spelling mistakes for tokens longer than 3 chars (ie. no abbreviations):\n # takes reaally long\n if correct_spelling_mistakes:\n for tested_token in filter(lambda token: len(token)>3, tokens):\n if not tested_token.isalpha(): # consider only tokens with only letters!\n continue\n\n cor = correction(tested_token)\n if tested_token == cor:\n continue\n \n # spelling mistake found! replace all occurences in the text.\n tokens = [cor if t == tested_token else t for t in tokens]\n # print(f\"'{token}' > {colored(cor, 'red')}\")\n\n if not remove_stop_words:\n return \" \".join(tokens) # remove multiple whitespaces in a row\n\n tokens = [word.replace('=', '') for word in tokens if not word in stop_words] #removes stop words from tokens and '=' from individual tokens\n return \" \".join(tokens)", "def clean_data(td):\n data = td.string\n try:\n return data.strip(\" \\n:-\")\n except AttributeError:\n return u\"\"", "def stage_three_preprocessing(data: pd.Series) -> pd.Series:\n data_ = data.dropna()\n data_ = shrink_whitespace(data_)\n #data_ = lemmatize(data_)\n return data_", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def clean_data(data):\n \n cols = data.columns\n \n #these columns had some extra characters in the strings becuase of encoding issues\n list_to_strip=[\n 'attributes_alcohol',\n 'attributes_restaurantsattire',\n 'attributes_wifi',\n 'attributes_smoking',\n 'attributes_noiselevel',\n ]\n #this removes quotation marks and u's from strings\n \n for col in list_to_strip:\n data[col]=data[col].str.strip(\"u\\'\")\n \n #this replaces the strings None and none with Nan objects\n for col in cols:\n data[col]=data[col].where(data[col]!='None')\n data[col]=data[col].where(data[col]!='none')\n \n #this creates a list of categorical and numerical features\n categorical_features = cols.drop([\n 'review_count',\n 'restaurant',\n 'latitude',\n 'longitude',\n 'business_id',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount',\n 'target'])\n \n \n numerical_features = [\n 'review_count',\n 'latitude',\n 'longitude',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount']\n \n #this replaces the categorial nans with 9 as a placeholder and fills numerical nans with 0\n data[categorical_features]=data[categorical_features].fillna(9)\n data[numerical_features]=data[numerical_features].fillna(0)\n \n #this makes all the categorical columns strings\n data[categorical_features]=data[categorical_features].astype(str)\n data = data\n \n return data, numerical_features, categorical_features", "def clean_text(s,stem=False):\n\tret = s.lower()\n\tret = re.sub(r'[^a-z ]',' ',ret)\n\tret = re.sub(r' +',' ',ret).strip()\n\tret = re.sub(r'see more occupations related to this (activity|skill|task)','',ret)\n\tif stem:\n\t\tret = ' '.join( stemmer.stem(word) for word in ret.split(' ') )\n\treturn ret", "def naive(self, text):\n\t\t#print(text)\n\t\ttokenizedText = []\n\t\tfor k in text: #look at each entity in one sentence\n\t\t\t\n\t\t\ta = \"\"#stores the current word \n\t\t\trun = []; #appends all words in a particular sentence\n\t\t\tfor i in range(len(k)):\n\t\t\t\t\n\t\t\t\tif(k[i] == ' ' or k[i] == '\t'): #tokenization at space or tab\n\t\t\t\t\t\n\t\t\t\t\tif(a!=\"\"):\n\t\t\t\t\t\tif(a[-1] == ',' or a[-1] == '-' or a[-1] == \"\\'\" or a[-1] == \";\" or a[-1] == \":\" or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\\"\") : #but remove mentioned punctuations from the end of the word, if present\n\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):#remove starting quotes\n\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telif(i == len(k)-1): #remove the last punctuation mark, if present\n\t\t\t\t\t\n\t\t\t\t\ta = a+k[i];\n\t\t\t\t\t\n\t\t\t\t\tif(a[-1] == '.' or a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" or a[-1] ==\"\\'\" ):\n\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\n\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tif((k[i] == ',' or k[i] == ':' or k[i] == ';') and k[i+1]!= ' ' ): # for other punctuation marks followed by a space\n\t\t\t\t\t\t#print(k[i-1])\n\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\tif(a[-1] == '\\\"' or a[-1] ==\"!\" or a[-1] == \"?\" ):\n\t\t\t\t\t\t\t\ta = a[:-1]\n\t\t\t\t\t\t\tif(len(a)>0 and a[0] == \"\\\"\"):\n\t\t\t\t\t\t\t\ta = a[1:]\n\t\t\t\t\t\t\tif(len(a)>0):\n\t\t\t\t\t\t\t\trun.append(a)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\ta = \"\"\n\n\n\t\t\t\t\telse:\n\n\t\t\t\t\t\ta = a+k[i];\n\n\t\t\ttokenizedText.append(run)\t\t\n\n\t\t\n\t\t\t\n\n\n\n\n\t\t#Fill in code here\n\n\t\treturn tokenizedText", "def preprocess(self, sentence, vocab_set=None):\n tokens = sentence.split()\n new_tokens = []\n for token in tokens:\n new_tokens += self.__clean(token)\n tokens = new_tokens\n\n tokens = self.__normalize_document(' '.join(tokens))\n\n return tokens", "def test_drop_punctuation():\n assert TextCleaner().transform([[\"'test!?\"]])[\"corpus\"][0] == \"test\"", "def clean_the_text(text):\n \n #Replace non-word characters with empty space\n text = re.sub('[^A-Za-z0-9\\s]', ' ', text)\n \n #Remove punctuation\n text = ''.join([word for word in text if word not in string.punctuation])\n \n #Bring text to lower case\n text = text.lower()\n \n #Tokenize the text\n tokens = re.split('\\W+', text)\n \n #Remove stopwords\n text = [word for word in tokens if word not in stopword]\n \n #Lemmatize the words\n text = [wn.lemmatize(word) for word in text]\n \n #Return text\n return text", "def sanitize(text):\n \n # Convert text to lowercase\n text = text.lower()\n\n # Replace all whitespace with a single space\n text = re.sub(r'\\s+',' ',text)\n\n # Remove all links (e.g. [abc](xyz)def --> [abc]def)\n text = re.sub(r'(\\[.*\\])(\\(.*\\))', r'\\1', text)\n\n # Remove URLs\n text = re.sub(r'((http[s]?://)?www.\\S+)|(http[s]?://\\S+)', '', text) \n\n # Split text on single spaces\n words = text.split()\n \n # Separate external punctuation then remove non-ending and non-embedded punctuation\n tokens = []\n for word in words:\n \tseparate_tokens(word, tokens)\n \n parsed_text = \"\"\n unigrams = \"\"\n bigrams = \"\"\n trigrams = \"\"\n \n # Populate lists to return\n for index, token in enumerate(tokens):\n \tparsed_text += token + ' '\n \tif token not in common:\n \t\tunigrams += token + ' '\n \t\tif index + 1 <= len(tokens)-1 and tokens[index+1] not in common:\n \t\t\tbigram = token + '_' + tokens[index+1]\n \t\t\tbigrams += bigram + ' '\n \t\t\tif index + 2 <= len(tokens)-1 and tokens[index+2] not in common:\n \t\t\t\ttrigrams += bigram + '_' + tokens[index+2] + ' '\n \n return parsed_text.strip().split() + unigrams.strip().split() + bigrams.strip().split()+ trigrams.strip().split()", "def set_clean_raw_text(raw_text):\n\tlogger.debug('Cleaning Text')\n\n\t#tokenize and lower sentence\n\ttokenizer = RegexpTokenizer(r'\\w+')\n\ttokens = tokenizer.tokenize(raw_text.lower())\t\t# tokens = nltk.word_tokenize(corpus.lower()) # without removing punctiation\n\n\t#remove stop words\n\ttokens = [w for w in tokens if not is_stopword(w)]\n\n\t#remove punctuation\n\ttokens = [w for w in tokens if not is_punctuation(w)]\n\n\t#remove short \n\ttokens = [w for w in tokens if not is_shorter(w)]\n\n\t#remove number\n\ttokens = [w for w in tokens if not is_number(w)]\n\n\t#stem words\n\ttokens = map(stem, tokens)\n\n\tlogger.debug('Cleaning Text Complete')\n\treturn set(tokens)", "def small_preprocess(data):\r\n \r\n # Remove new line characters\r\n data = [re.sub('\\s+', ' ', sent) for sent in data]\r\n # Remove distracting single quotes\r\n data = [re.sub(\"\\'\", \"\", sent) for sent in data]\r\n\r\n return data", "def clean_document(cls, text: str) -> str:\n # stop words will be removed while computing the vectorizer\n text_processed = text.translate(\n str.maketrans('', '', string.punctuation)).lower() # removing punctuations and converting to lower case\n # tokenization\n token_words = word_tokenize(text_processed)\n # stemming below\n stem_sentence = []\n for word in token_words:\n stem_sentence.append(porter.stem(word))\n stem_sentence.append(\" \")\n return \"\".join(stem_sentence)", "def cleanTweet(text, appostrophes=True, emojis=True, html=True, url=True, misspellings=True, punctuation=True, lemming=True,\\\r\n stop=True):\r\n if appostrophes:\r\n #convert appostrophes\r\n filtered_string = decontracted(text)\r\n if emojis:\r\n #decoding, removing emojis\r\n filtered_string = filtered_string.encode(\"utf-8\").decode('ascii','ignore')\r\n if html:\r\n #cleaning of html tags\r\n htmltags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\r\n filtered_string = re.sub(htmltags, '', filtered_string)\r\n if url:\r\n #cleaning of url\r\n url = re.compile(r'https?://\\S+|www\\.\\S+')\r\n filtered_string = re.sub(url, '', text)\r\n if misspellings:\r\n #cleaning of misspellings\r\n spell = SpellChecker()\r\n corrected_text = []\r\n misspelled_words = spell.unknown(filtered_string.split())\r\n for word in filtered_string.split():\r\n if word in misspelled_words:\r\n corrected_text.append(spell.correction(word))\r\n else:\r\n corrected_text.append(word)\r\n filtered_string = \" \".join(corrected_text)\r\n if punctuation:\r\n word_tokens = word_tokenize(filtered_string)\r\n #remove punctuations\r\n table=str.maketrans('','',string.punctuation)\r\n filtered_string.translate(table) \r\n filtered_string = [word.translate(table) for word in word_tokens]\r\n filtered_string = \" \".join(filtered_string)\r\n if lemming:\r\n #lemming of words\r\n word_tokens = word_tokenize(filtered_string)\r\n lemmatizer = WordNetLemmatizer() \r\n filtered_string = [lemmatizer.lemmatize(word) for word in word_tokens]\r\n if stop:\r\n # cleaning from stopwords\r\n stop_words=set(stopwords.words('english'))\r\n stop_word_drop = [] \r\n for word in filtered_string: \r\n if word not in stop_words: \r\n stop_word_drop.append(word) \r\n filtered_string = \" \".join(stop_word_drop)\r\n \r\n #toDos\r\n #cleaning of rare words\r\n # tokens is a list of all tokens in corpus\r\n # freq_dist = nltk.FreqDist(token)\r\n # rarewords = freq_dist.keys()[-50:]\r\n # after_rare_words = [ word for word in token not in rarewords]\r\n #cleaning of slang words\r\n #split attached words, not working and questionable because of all capital words\r\n # filtered_string = \" \".join(re.findall('[A-Z][^A-Z]*', filtered_string))\r\n return filtered_string", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean(self, x): # should not contain any other arguments (use fields set in constructor instead).\n\n def repl(m):\n return chr(int('0x' + m.group(1), 16))\n\n # replace double escaped \"\\\\\" unicode strings with their unicode characters\n x = [re.sub(r'\\\\n', '\\n', message) for message in x]\n x = [re.sub(r'\\\\x([a-f0-9]{2})', repl, message) for message in x]\n x = [re.sub(r'\\\\u([a-f0-9]{4})', repl, message) for message in x]\n if self.ignore_urls:\n x = [re.sub(self.re_url, '', message) for message in x]\n\n if self.fix_contractions:\n import contractions\n x = [contractions.fix(message) for message in x]\n\n if self.remove_foreign_characters:\n # replace accented characters with unaccented\n x = [unidecode.unidecode(message) for message in x]\n\n # replace nonascii characters with space\n x = [''.join(character if ord(character) < 128 else ' ' for character in message) for message in x]\n\n # Create sentence structure like nltk gutenberg.sents()\n # list of sentences for each message:\n x = [self.sent_detector.tokenize(message.strip()) for message in x]\n # list of list of words for each message/sentence:\n x = [[self.word_tokenizer.tokenize(sentence) for sentence in message] for message in x]\n\n if self.lower:\n # lower_sents: lowercase words ignoring punctuation\n x = [[[\n word.lower() for word in sentence] for sentence in message\n ] for message in x]\n\n if self.remove_punctuation:\n x = [[[\n word for word in sentence if word not in list(string.punctuation)] for sentence in message\n ] for message in x]\n\n if self.stem:\n x = [[[self.stemmer.stem(word) for word in sentence] for sentence in message] for message in x]\n\n if self.lower and self.bigrams:\n # clean_sents: replace common adjacent words with bigrams\n x = [[self.bigram[sentence] for sentence in message] for message in x]\n\n if self.omit_stopwords:\n x = [[[word for word in sentence if word not in stopwords.words('english')] for sentence in message] for\n message in x]\n\n # convert back to one string per message (join words into sentences and sentences into messages)\n x = ['\\n'.join(' '.join(sentence) for sentence in message) for message in x]\n return x", "def trim_features():\n pass", "def clean_twitter_tokens(text):\n preprocessor.set_options(preprocessor.OPT.URL, preprocessor.OPT.RESERVED, preprocessor.OPT.MENTION,\n preprocessor.OPT.NUMBER)\n return preprocessor.clean(text)", "def space_detokenizer(batch: List[List[str]]) -> List[str]:\n return [\" \".join(tokens) for tokens in batch]", "def clean_text(text):\n\n lemmizer = WordNetLemmatizer()\n stemmer = porter.PorterStemmer()\n\n stop = stopwords.words('english')\n stop += ['.', ',', ':', '...', '!\"', '?\"', \"'\", '\"', ' - ', ' — ', ',\"', '.\"', '!', ';', '♫♫', '♫', \\\n '.\\'\"', '[', ']', '—', \".\\'\", 'ok', 'okay', 'yeah', 'ya', 'stuff', ' 000 ', ' em ', \\\n ' oh ', 'thank', 'thanks', 'la', 'was', 'wa', '?', 'like', 'go', ' le ', ' ca ', ' I ', \" ? \", \"s\", \" t \",\n \"ve\", \"re\"]\n # stop = set(stop)\n\n cleaned_text = []\n\n for post in text:\n cleaned_words = []\n\n # remove parentheticals\n clean_parens = re.sub(r'\\([^)]*\\)', ' ', post)\n\n #clean_parens = [line.decode('utf-8').strip() for line in clean_parens]\n\n # tokenize into words\n for word in wordpunct_tokenize(clean_parens):\n\n\n # lowercase and throw out any words in stop words\n if word.lower() not in stop:\n\n # lemmatize to roots\n low_word = lemmizer.lemmatize(word)\n\n # stem and lowercase ( an alternative to lemmatize)\n # low_word = stemmer.stem(root.lower())\n\n # keep if not in stopwords (yes, again)\n if low_word.lower() not in stop:\n # put into a list of words for each document\n cleaned_words.append(low_word.lower())\n\n # keep corpus of cleaned words for each document\n cleaned_text.append(' '.join(cleaned_words))\n\n\n return cleaned_text", "def normalize_tokens(tokens, language):\n try:\n stopwords = set(nltk.corpus.stopwords.words(language))\n except IOError:\n stopwords = {}\n return [t for t in tokens if t.isalnum() and t not in stopwords]", "def _clean_data(self, dataset):\n dataset.dropna(inplace=True)\n # Problem: handle missing data (in a different way), noisy data, inconsistent data", "def preprocess_sent(sent):\n #tokenized = word_tokenize(sent.lower())\n tokenizer = Tok()\n tokenized = tokenizer.tokenize(sent.lower())\n return tokenized", "def setOwnTokens(self):\n\t\tself.removeOwnPunctuation()\n\t\tself.removeOwnStopWords()", "def clean(c):", "def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))", "def _remove_stopwords(data, settings):\n column = settings['input_col']\n output_col = settings['output_col']\n frag = settings['id_frag']\n\n stopwords = settings['news_stops_words']\n stopwords += settings['stopwords']\n stopwords = np.unique(stopwords)\n\n tmp = []\n if data.shape[0] > 0:\n if settings['case_sensitive']:\n stopwords = set(stopwords)\n for tokens in data[column].values:\n tmp.append(list(set(tokens).difference(stopwords)))\n\n else:\n stopwords = set([tok.lower() for tok in stopwords])\n\n for tokens in data[column].values:\n entry = [tok.lower() for tok in tokens]\n tmp.append(list(set(entry).difference(stopwords)))\n\n else:\n tmp = np.nan\n\n if output_col in data.columns:\n data.drop([output_col], axis=1, inplace=True)\n\n data[output_col] = tmp\n\n info = generate_info(data, frag)\n return data, info", "def remove_tokens(self, text):\r\n\r\n return text.replace(self.PAD_TK, \"\").replace(self.UNK_TK, \"\")", "def text_clean(text):\n out = []\n # Define a punctuation dictionary so that we can replace each punctuation with an empty space.\n table = str.maketrans('', '', string.punctuation)\n stopWords = set(stopwords.words('senti')) # Set stop words language to English\n tokens = text.split() # Split each tweet into list of words.\n tokens = filter(lambda x: x[0] != '@', tokens) # Remove mentions\n tokens = [word.translate(table) for word in tokens] # Remove punctuation marks\n tokens = [word for word in tokens if word.isalpha()] # Remove any word that is not completely alphabetic.\n tokens = [word for word in tokens if len(word) > 1] # Remove any word that is shorter than two letters\n tokens = [word.lower() for word in tokens]\n tokens = [word for word in tokens if not word in stopWords] # Remove any stopwords\n token = \"\"\n for i in tokens:\n token += (i + \" \")\n out.append(token)\n return out", "def sanitize_diagnoses(df):\n df = df.str.replace(\"\\W\", \"\") # \"\\W\" regex represents ANY non-alphanumeric character\n# assert (df.str.contains(\"\\W\")).any(), \"At least one diagnosis has a non-alphanumeric character in it\"\n return df", "def stringNormalization(listOfInput):\n\n stop = stopwords.words('english')\n stop_words = (nltk.corpus.stopwords.words('english') + \n ['.', ',', '--', '\\'s', '?', '!', ')', '(', ':', '\\'','\\'re', '\"',\n '-', '}', '{', u'—', 'rt', 'http', 't', 'co', '@', '#',])\n tokens = []\n for wordInput in listOfInput:\n tokens += (nltk.tokenize.word_tokenize(\n re.sub(r'W+', '', wordInput.lower())))\n stemmer = PorterStemmer()\n stemmed = []\n for token in tokens:\n if '/t.co/' in token or token is '' or token in stop_words:\n continue\n stemmed.append(stemmer.stem(token))\n return stemmed", "def clean_training_text(txt):\n return re.sub('[^A-Za-z0-9]+', ' ', str(txt)).strip()", "def cleanTweetText(tweet):\n twext = excludeTwitterTags(tweet)\n twext = stripPunctuation(twext)\n return twext", "def cleanTweetText(tweet):\n twext = excludeTwitterTags(tweet)\n twext = stripPunctuation(twext)\n return twext", "def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n count_miss = df.isnull().sum(axis=0).values #find number of nans for each column\n count_miss = [val for val in count_miss]\n \n drop_cols = []\n\n for ind, val in enumerate(count_miss):\n if val > 200000:\n drop_cols.append(ind)\n \n df_drop_cols = list(azdias.columns[drop_cols])\n df = df.drop(df_drop_cols, axis=1)\n \n for col in range(df.shape[1]): #loop through columns\n column_name = df.columns[col] #get column name\n missing_list = feat_info.iloc[col,3] #get missing_or_unknown column from feature info\n missing_list = missing_list.replace('[','') #remove left bracket from string\n missing_list = missing_list.replace(']','') #remove right bracket from string\n missing_list = missing_list.split(',') #split into individual strings\n \n #find data that is natually missing and continue loop to omit\n if missing_list == ['']:\n continue\n \n else:\n for dat_type in missing_list: \n if df[column_name].dtype == 'object': #find values that contain x\n df.loc[df[column_name] == dat_type, column_name] = np.nan #replace x with nan\n \n else:\n dat_type = int(dat_type) #if no x, convert to integer and replace with nan\n df.loc[df[column_name] == dat_type, column_name] = np.nan\n \n # select, re-encode, and engineer column values.\n \n # encode OST_WEST_KZ\n df.loc[df['OST_WEST_KZ'] == 'W','OST_WEST_KZ'] = 0\n df.loc[df['OST_WEST_KZ'] == 'O','OST_WEST_KZ'] = 1\n \n # Re-encode categorical variable(s) to be kept in the analysis.\n \n \n #get list of attributes with type categorical\n feat_info[feat_info['type'] == 'categorical']\n \n cat_new_cols = [] #initialize\n for i in feat_info[feat_info['type'] == 'categorical']['attribute']:\n cat_new_cols.append(i)\n \n for cols in df.columns:\n if cols in cat_new_cols:\n if df[cols].nunique(dropna=True) > 2: #if the number of unique values is greater than 2 \n df = df.drop(cols, axis=1) #drop from the analysis\n print(\"more than 2 categories: {}\".format(cols))\n \n else:\n if not df[cols].unique()[0] > 0:\n #if not df[cols].unique()[0] > 0:\n dummies = pd.get_dummies(df[cols], prefix=cols)\n df = df.drop(cols, axis=1) #create dummy variable\n df = df.join(dummies)\n print(\"transformed to dummy variable: {}\".format(cols))\n \n # create variable: MOVEMENT\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]),'MOVEMENT'] = 1\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([2,4,6,7,9,11,13,15]),'MOVEMENT'] = 2\n \n #Capture Decade\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,2]), 'DECADE'] = 40\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([3,4]), 'DECADE'] = 50\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([5,6,7]), 'DECADE'] = 60\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([8,9]), 'DECADE'] = 70\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([10,11,12,13]), 'DECADE'] = 80\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([14,15]), 'DECADE'] = 90\n \n df['CAMEO_INTL_2015'] = df['CAMEO_INTL_2015'].astype(float)\n\n # create new variable: WEALTH\n df.loc[df['CAMEO_INTL_2015'].isin([51,52,53,54,55]), 'WEALTH'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([41,42,43,44,45]), 'WEALTH'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([31,32,33,34,35]), 'WEALTH'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([21,22,23,24,25]), 'WEALTH'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([11,12,13,14,15]), 'WEALTH'] = 5\n \n # create new variable: LIFE_STAGE\n df.loc[df['CAMEO_INTL_2015'].isin([11,21,31,41,51]),'LIFE_STAGE'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([12,22,32,42,52]),'LIFE_STAGE'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([13,23,33,43,53]),'LIFE_STAGE'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([14,24,34,44,54]),'LIFE_STAGE'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([15,25,35,45,55]),'LIFE_STAGE'] = 5\n \n # remove selected columns and rows, ...\n df = df.drop('PRAEGENDE_JUGENDJAHRE', axis=1)\n df = df.drop('CAMEO_INTL_2015',axis=1)\n \n # Return the cleaned dataframe.\n return df", "def remove_punc_sw(self, docs):\n \n new_docs = []\n \n for text in docs:\n \n for p in punc:\n text = text.replace(p,' ')\n text = text.replace('-', '')\n text = text.replace(\"’\", ' ')\n text = text.lower()\n tokens = word_tokenize(text)\n filtered_tokens = list(filter(lambda token: token not in stopwords, tokens))\n \n new_text = \" \".join(filtered_tokens)\n new_docs.append(new_text)\n \n return pd.Series(new_docs)", "def preprocess_corpus(corpus): \n \n # print 'preprocessing words'\n # remove space\n # text = re.findall(r'\\w+', corpus) # for [a-zA-Z0-9_]\n text = re.findall(r'[a-zA-Z]+', corpus) # for [a-zA-Z] keep words only no numbers and '_' \n words = [w.lower() for w in text]\n # print words \n \n # stemmer based on existing ones in the current list\n lemma = nltk.WordNetLemmatizer()\t\t\t#extract the original word pattern\n lemmed_words = [lemma.lemmatize(w) for w in words]\n \n # tag lemmed_words\n tagged_words = nltk.pos_tag(lemmed_words)\n # print tagged_words \n \n processed_words = []\n tag_list = ['CC', 'DT', 'EX', 'IN', 'MD', \n 'PDT', 'POS', 'PRP', 'PRP$', 'TO', \n 'WDT', 'WP', 'WRB']\n for word, tag in tagged_words:\n if tag in tag_list:\n pass \n else: \n processed_words.append(word)\n \n return processed_words", "def clean_all(text):\n # anticipate Null values in columns that will be cleaned\n if text is not None and type(text) is not float:\n text = \"\".join(text)\n no_ucode = clean_unicode(text)\n no_space = \"\".join(clean_whitespaces(no_ucode.strip()))\n text = no_space.strip()\n\n return text", "def detokenize(self, tokens):\n text = ' '.join(tokens)\n step0 = text.replace('. . .', '...')\n step1 = step0.replace(\"`` \", '\"').replace(\" ''\", '\"')\n step2 = step1.replace(\" ( \", \" (\").replace(\" ) \", \") \")\n step3 = re.sub(r' ([.,:;?!%]+)([ \\'\"`])', r\"\\1\\2\", step2)\n step4 = re.sub(r' ([.,:;?!%]+)$', r\"\\1\", step3)\n step5 = step4.replace(\" '\", \"'\").replace(\" n't\", \"n't\")\\\n .replace(\" nt\", \"nt\").replace(\"can not\", \"cannot\")\n step6 = step5.replace(\" ` \", \" '\")\n return step6.strip()", "def preprocessSentence(sentence):\n tokenizedSentence = tokenize.word_tokenize(sentence.lower())\n lemmatized = [lemma.lemmatize(token) for token in tokenizedSentence]\n\n noStopwords = [lemma for lemma in lemmatized\n if lemma not in englishStopwords\n and len(lemma) > 2\n and lemma.count(\"'\") != 1]\n noOddChars = [re.sub('[^\\w\\s]','',word) for word in noStopwords]\n return noOddChars", "def gb_cleaner(df):\n df['tag'] = df.tags.apply(retagger)\n \n c_list = df.text.tolist()\n\n clean_corpus = []\n for docs in c_list:\n clean_corpus.append(data_cleaner(docs))\n \n df['clean'] = clean_corpus\n\n df = df.drop(['text', 'tags', 'stars'], axis= 1)\n \n return df", "def remove_stop_words(dataset):\n for n in range(len(dataset)):\n try:\n # concatenate the title and keywords\n current_title = dataset.iloc[n][\"Title of Post\"]\n current_description = dataset.iloc[n][\"Post Description\"]\n\n token_title = word_tokenize(current_title)\n token_description = word_tokenize(current_description)\n filtered_title = []\n filtered_description = []\n\n for word in token_description:\n if word not in stop_words:\n filtered_description.append(word)\n\n filtered_description = listToString(filtered_description)\n\n for word in token_title:\n if word not in stop_words:\n filtered_title.append(word)\n\n filtered_title = listToString(filtered_title)\n\n dataset.iloc[n][\"Title of Post\"] = filtered_title\n dataset.iloc[n][\"Post Description\"] = filtered_description\n\n except:\n pass\n\n return dataset", "def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()", "def _text_clean(self):\n try:\n self.text = eval(self.text[0])[0]['node']['text']\n self.clean = True\n except IndexError:\n return", "def clean_token(token):\n return token.strip().lower().replace(' ', '_')", "def clean_non_word_chars(tokens):\n toks = []\n for token in tokens:\n t = re.sub(r'\\W', \"\", token)\n if len(t) > 1:\n toks.append(t)\n\n return toks", "def clean_term(term, site='', siteWordCount=None, dataType=''): # dtype\n if pd.isna(term): \n print(\"(clean_term) Input term is NaN: {}\".format(term))\n return ''\n if not isinstance(term, str): \n return str(term)\n\n insigWords = LoincTable.stop_words # [\"IN\", \"FROM\", \"ON\", \"OR\", \"OF\", \"BY\", \"AND\", \"&\", \"TO\", \"BY\", \"\", \" \"]\n \n modTerm = (term.replace(\"'\", \"\").replace(\",\", \" \").replace(\".\", \" \") \\\n .replace(\":\", \" \").replace('\\t', \" \").replace(\"^\", \" \").replace(\"+\", \" \")\\\n .replace(\"*\", \" \").replace(\"~\", \" \").replace(\"(\", \" \").replace(\")\", \" \")\\\n .replace(\"!\", \" \").replace(\"[\", \" \").replace(\"]\", \" \").replace(\"{\", \" \").replace(\"}\", \" \")\\\n .replace(\"_\", \" \").replace(\"|\", \" \").replace('\"', \" \").split(\" \"))\n\n #############################################################################\n i = 0\n while i < len(modTerm):\n modTerm[i] = re.sub(r\"\\d{1,2}[\\/-]\\d{1,4}([\\/-]\\d{2,4})*|\\d{6}\", \"\", modTerm[i])\n if modTerm[i] != None and len(modTerm[i]) > 0:\n i = i + 1\n else:\n modTerm.remove(modTerm[i])\n #############################################################################\n\n # remove repeated tokens \n modTerm = sorted(set(modTerm), key=modTerm.index)\n\n j = 0\n nameSplit = list()\n while j < len(modTerm):\n splits = modTerm[j].replace(\"/\", \" \").replace(\"\\\\\", \" \").replace(\"-\", \" \").split(\" \")\n k = 0\n while ((k < len(splits)) and (len(splits[k]) > 0) and (splits[k] not in insigWords)):\n newWord = splits[k].strip()\n nameSplit.append(newWord)\n\n if len(site) > 0 and isinstance(siteWordCount, dict): \n siteWordCount[site][newWord] += 1\n k = k + 1\n j = j + 1\n\n return \" \".join(nameSplit)", "def prepare_data(self, data):\n # Break string into a list of sentances\n in_sentances = tokenize.sent_tokenize(data)\n out_sentances = list()\n for sentance in in_sentances:\n # Turn each word in sentance into its lemma\n lemmas = [self.lemmatizer.lemmatize(word) for word in sentance.split(\" \")]\n # Filters out all words that fail the is_valid_lemma function\n lemmas = [lemma for lemma in lemmas if self.is_valid_lemma(lemma)]\n # Joins words back together and add to list\n sentance = ' '.join(lemmas)\n out_sentances.append(sentance)\n return out_sentances", "def filter_token(token: Text) -> Text:\n def strip_enum(token: Text) -> Text:\n \"\"\"\n Remove any enumerations from the given token\n\n Parameters\n ----------\n token: Text :\n The token that we want to remove any enumerations from\n Returns\n -------\n A filtered version of the token that does not have any\n enumerations.\n \"\"\"\n if not token:\n return ''\n if token[0] == '(' and token[len(token) - 1] != ')':\n return ''\n if token[0] != '(' or (token[0] == '(' and token[len(token) -\n 1] == ')'):\n return ''.join(enum_filter.split(token))\n return ''\n\n if email_filter.match(token) or (\n stop_words and token in stop_words\n ):\n return ''\n # Strip enumeration from token\n token = strip_enum(token)\n # Strip punctuation from token\n token = ''.join(punc_filter.split(token))\n # Strip numbers from token\n token = ''.join(num_filter.split(token))\n # Remove non-printable characters\n token = ''.join(c for c in token if c in printable_chars)\n\n return '' if len(token) < 3 else token", "def clean(data_org):\n #cleaning up the text\n data = data_org.lower()\n data = data.replace(';','.')\n data = data.replace(':','.')\n data = data.replace('-',' ')\n data = data.replace('/',' ')\n # data = data.replace('\\n',' ')\n data = re.sub(r'\\([^)]*\\)', '', data)\n pattern = r'\\[[^\\]]*\\]'\n data = re.sub(pattern, '', data)\n\n if '\\n' in data:\n #newline handling\n data = '\\r\\n'.join([x for x in data.splitlines() if x.strip()])\n data = data.split('\\n')\n #removing punctuation at end of line\n data = [x[:-1] for x in data]\n for x in data:\n if x[:-1] in string.punctuation:\n x[:-1]\n #remove digits\n data = [re.sub(r\"\\d+\", \"\", x) for x in data]\n #remove tabs\n data = [x.replace('\\t',' ') for x in data]\n #remove excess spaces\n data = [' '.join(x.split()) for x in data]\n #remove trailing and leading spaces\n data = [x.strip() for x in data]\n #remove empty elements from list\n data = list(filter(None, data))\n #rejoin list into string\n data = '. '.join(data)\n\n data_list = data.split('. ')\n #remove digits\n data_list = [re.sub(r\"\\d+\", \"\", x) for x in data_list]\n #strip leading and trailing spaces\n data_list = [x.strip() for x in data_list]\n #remove all extra spaces\n data_list = [' '.join(x.split()) for x in data_list]\n #remove punctuation\n data_list = [x.translate(str.maketrans('', '', string.punctuation)) for x in data_list]\n #filter out none elements\n data_list = list(filter(None, data_list))\n data_list = [x for x in data_list if len(x) > 1]\n data = '. '.join(data_list)\n\n return data", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def run_sanitize_characters():\n sanitize_characters('politics_past_30_months_comments.csv', 'politics_past_30_months_comments_cleaned.csv')\n\n df = pd.read_csv('politics_past_30_months_comments_cleaned.csv')\n df = df.drop(['Unnamed: 0'], axis=1)\n print(df.head())", "def process_text(text, stem=True):\n exclude = set(string.punctuation)\n text = ''.join(ch for ch in text if ch not in exclude)\n #text = text.translate(None, string.punctuation)\n tokens = word_tokenize(text)\n if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n return tokens" ]
[ "0.66215855", "0.61963105", "0.61640877", "0.6127651", "0.6074511", "0.6014394", "0.59874463", "0.5976716", "0.5957324", "0.59493726", "0.59331477", "0.59058803", "0.5905406", "0.59032536", "0.5874476", "0.58563834", "0.584708", "0.58449286", "0.58449286", "0.5819846", "0.5792583", "0.5792487", "0.5790692", "0.5789129", "0.5786255", "0.5782791", "0.5766726", "0.57614744", "0.5750871", "0.5723139", "0.57190603", "0.57138366", "0.5711118", "0.5710145", "0.57090425", "0.5708208", "0.5691648", "0.5683113", "0.56647533", "0.56613547", "0.5654258", "0.5652126", "0.56316614", "0.5605484", "0.5599574", "0.5599", "0.5590072", "0.5590028", "0.5589053", "0.55799204", "0.5578504", "0.55731106", "0.55650806", "0.5558549", "0.5554797", "0.5552216", "0.55515325", "0.5544461", "0.5544461", "0.5544461", "0.5544461", "0.5544461", "0.5544461", "0.55412644", "0.55406314", "0.5534514", "0.5519195", "0.5512465", "0.55060285", "0.55042696", "0.55041337", "0.5503152", "0.5491287", "0.5490123", "0.5488578", "0.54764026", "0.5474899", "0.5473678", "0.54659516", "0.54618", "0.5455768", "0.5455768", "0.54345274", "0.543278", "0.5431715", "0.54224974", "0.5419781", "0.5411491", "0.54111356", "0.54060316", "0.5404151", "0.54024255", "0.53979117", "0.53849417", "0.5384547", "0.53789437", "0.5376047", "0.5368213", "0.5366686", "0.53635204", "0.5360191" ]
0.0
-1
Builds a vocabulary mapping from word to index based on the sentences. Returns vocabulary mapping and inverse vocabulary mapping.
def build_vocab(sentences): # Build vocabulary word_counts = Counter(itertools.chain(*sentences)) # Mapping from index to word vocabulary_inv = [x[0] for x in word_counts.most_common()] # Mapping from word to index vocabulary = {x: i for i, x in enumerate(vocabulary_inv)} return [vocabulary, vocabulary_inv]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_vocab(self, sentences):\n\t\t# Build the vocab\n\t\tword_counts = collections.Counter(sentences)\n\n\t\t# Mapping from index to word (get the indices of most common words)\n\t\tvocab_inv = [x[0] for x in word_counts.most_common()] # Do we need this?\n\t\tvocab_inv = list(sorted(vocab_inv))\n\n\t\t# Mapping from word to index\n\n\t\tvocab = {x: i for i,x in enumerate(vocab_inv)}\n\n\t\treturn [vocab, vocab_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def word2vec_mapping_func():\n return {\"belonging to\": \"belonging\", \"parked on\": \"parked\", \"growing on\": \"growing\", \"standing on\": \"standing\",\n \"made of\": \"made\", \"attached to\": \"attached\", \"hanging from\": \"hanging\", \"in front of\": \"front\",\n \"lying on\": \"lying\", \"flying in\": \"flying\", \"looking at\": \"looking\", \"on back of\": \"back\",\n \"laying on\": \"laying\", \"walking on\": \"walking\", \"walking in\": \"walking\", \"sitting on\": \"sitting\",\n \"covered in\": \"covered\", \"part of\": \"part\", \"painted on\": \"painted\", \"mounted on\": \"mounted\"}", "def _build_vocab(self, sentences, markers=[]):\n from snorkel.learning.pytorch.rnn.utils import SymbolTable\n\n vocab = Counter()\n for sent in sentences:\n for w in sent:\n vocab[w] += 1\n word_dict = SymbolTable()\n list(map(word_dict.get, vocab))\n list(map(word_dict.get, markers))\n return word_dict", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def build_vocab(sentences, max_num_words):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)).most_common()\n if max_num_words != 0 and max_num_words < len(word_counts):\n word_counts = word_counts[:max_num_words]\n\n # Mapping from index to word\n vocabulary = dict()\n index = 0\n for x in word_counts:\n vocabulary[index] = x[0]\n index += 1\n\n return vocabulary", "def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def make_idx2word():\n idx2word = {}\n d = train_data.shared['word2idx']\n for word, idx in d.items():\n print(word)\n idx2word[idx] = word\n if config.use_glove_for_unk:\n d2 = train_data.shared['new_word2idx']\n for word, idx in d2.items():\n print(word)\n idx2word[idx+len(d)] = word\n return idx2word", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def get_sentence_to_context_map(sentences):\n # Load the vocab\n en_vocab = get_english_vocab(DATA_DIR,VOCAB_SIZE)\n\n # Allocate the sentences to buckets\n bucketed = {}\n for sentence in sentences:\n bucket_id = get_bucket(en_vocab,sentence)\n bucketed.setdefault(bucket_id,[])\n bucketed[bucket_id].append(sentence)\n\n mapped = {}\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(sess, True, train_dir=TRAIN_DIR)\n model.batch_size = BATCH_SIZE # We decode 64 sentence at a time.\n # Iterate over each bucket\n for bucket_id,sentences in bucketed.iteritems():\n for batch in chunker(sentences,BATCH_SIZE):\n data = []\n # Tokenize each sentence\n for sentence in batch:\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)\n expected_output = []\n data.append((token_ids, expected_output))\n # Use the model to obtain contexts for each sentence in the batch\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: data}, bucket_id)\n contexts = model.step_context(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id)\n features = np.hstack(contexts)\n print 'Encoded {0} sentences into {1} dimensional vectors'.format(*features.shape)\n # Now we align sentences with their contexts\n for i,sentence in enumerate(batch):\n mapped[sentence] = features[i,:].tolist()\n return mapped", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n dico['<UNK>'] = 10000000\n word_to_id, id_to_word = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words))\n )\n return dico, word_to_id, id_to_word", "def build_Wordv(word2vec_dict, k):\r\n vocab_size = len(word2vec_dict)\r\n word2id_dict = dict()\r\n W = np.zeros(shape=(vocab_size + 1, k))\r\n W[0] = np.zeros(k)\r\n i = 1\r\n for word in word2vec_dict:\r\n # print type(word), ' | ', word\r\n W[i] = word2vec_dict[word]\r\n # print type(W[i]), \" | \", W[i]\r\n word2id_dict[word] = i\r\n i += 1\r\n return W, word2id_dict", "def index2words(index_sentence, vcb_file):\n\n sentence = ''\n indx_dict = {}\n vcb = open(vcb_file).readlines()\n for line in vcb:\n line = line.split()\n indx_dict[int(line[0])] = line[1]\n\n for word in index_sentence:\n\n if word == -1:\n sentence += '_eps_' + ' '\n else:\n sentence += indx_dict[word] + ' '\n return sentence", "def build_idx(vocab):\n word2index = {}\n index2word = {}\n\n word2index['PAD'] = 0\n index2word[0] = 'PAD'\n\n word2index['UNK'] = 1\n index2word[1] = 'UNK'\n\n for i,word in enumerate(vocab):\n word2index[word.lower()] = i+2\n index2word[i+2] = word.lower()\n\n return word2index, index2word", "def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary", "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n\n dico['<PAD>'] = 10000001\n dico['<UNK>'] = 10000000\n dico = {k:v for k,v in dico.items() if v>=3}\n word_to_id, id_to_word = create_mapping(dico)\n\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words)\n ))\n return dico, word_to_id, id_to_word", "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def inverted_word_index(idx):\n words, idxs = zip(*tokenizer.word_index.items())\n inverted_word_index = dict(zip(idxs, words))\n return inverted_word_index.get(idx)", "def convert_words_to_index(sentences_list, dictionary):\n return [[dictionary[word]\n if word in dictionary else 0\n for word in sentence] for sentence in sentences_list]", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def build_embedding_matrix_from_gensim_model(word_index, model, method=\"model\", lower=True, verbose=True):\n embedding_matrix = None\n for word, i in tqdm(word_index.items(), disable=not verbose):\n if lower:\n word = word.lower()\n embedding_vector = get_vect(word, model, method)\n if embedding_matrix is None and embedding_vector is not None:\n embedding_matrix = np.zeros((len(word_index) + 1, embedding_vector.shape[0]))\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n return embedding_matrix", "def generate_vocab_dict(vocab):\n v_dict = {}\n for word in vocab:\n if len(word) in v_dict:\n v_dict[len(word)].append(word)\n else:\n v_dict[len(word)] = [word]\n return v_dict", "def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab", "def convert_words_to_index(actual_text, dictionary,length):\n output_index=[]\n for words in actual_text:\n full_sentence = [dictionary[word] if word in dictionary else 0 for word in words]\n sen_len=len(full_sentence)\n if sen_len<length: # padding\n full_sentence.extend([0]*(length-sen_len))\n else:\n full_sentence=full_sentence[:length]\n output_index.append(full_sentence)\n return output_index", "def build_inverted_index():\r\n # vacabulary list (with out common_words)\r\n file_read = read_file()\r\n vacabulary_list = []\r\n common_words = read_common_words()\r\n for key in file_read:\r\n for element in file_read[key]:\r\n if (element not in vacabulary_list) & (element not in common_words):\r\n vacabulary_list.append(element)\r\n\r\n # word list of each file\r\n content = remove_common_words(file_read, common_words) # content = stopping()\r\n\r\n # generate direction to save result\r\n inverted_index = {}\r\n for item in vacabulary_list:\r\n inverted_index[item] = {}\r\n\r\n for file_id in content.keys():\r\n frequency = Counter(\r\n content[file_id]) # the frequency of words in a file : {'slipstream': 5, 'lift': 4, 'wing': 3}\r\n for word in frequency.keys():\r\n inverted_index[word][file_id] = frequency[word]\r\n\r\n inverted_index = sorted(inverted_index.items(), key=lambda d: d[0], reverse=False)\r\n inverted_index = dict(inverted_index)\r\n return inverted_index", "def create_index(self, vocabulary=[]) -> dict:\n try:\n out = {}\n for word in vocabulary:\n if word in out:\n out[word] += 1\n else: \n out[word] = 1\n return(out)\n except Exception as error:\n print(f\"Error: self.create_index([...]) -> {error}\")", "def word2vec(self, sentence: str):\n tokens = nltk.word_tokenize(sentence)\n v = [self.word_dict.get(token, 0) for token in tokens]\n return v", "def buildVocabToNumMapping(vocab):\n # Index starts at one so we reseve 0 as a padding character \n index = 1\n vocab_to_num = {}\n num_to_vocab = {}\n \n for word in vocab:\n if word not in vocab_to_num:\n vocab_to_num[word] = index\n num_to_vocab[index] = word\n index += 1\n print(\"Max index // length of vocab: %s\" % index)\n \n return (vocab_to_num, num_to_vocab)", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def convert_to_index(sentences):\n\n\twords=[]\n\tfor idx, sentence in enumerate(sentences):\n\t\tfor word, label, sid, book, bert in sentence:\n\t\t\twords.append([book, sid, word, label])\n\n\treturn words", "def index(self):\n print(\"Indexing...\")\n # ------------------------------------------------------------------\n # TODO: Create an inverted, positional index.\n # Granted this may not be a linked list as in a proper\n # implementation.\n # This index should allow easy access to both \n # 1) the documents in which a particular word is contained, and \n # 2) for every document, the positions of that word in the document \n # Some helpful instance variables:\n # * self.docs = List of documents\n # * self.titles = List of titles\n inv_index = defaultdict(set)\n self.tf = defaultdict(Counter)\n \n for word in self.vocab:\n inv_index[word] = {} # create dictionary with words in V\n\n # Generate inverted index here\n for doc in range(len(self.docs)):\n for word in self.docs[doc]:\n self.tf[doc][word] += 1 # represents how many times word 'word' is mentioned in document 'i'\n \n for doc, title in zip(self.docs, self.titles):\n for word in self.vocab:\n inv_index[word][title] = [] # list for each word in vocabulary for all titles\n for pos, word in enumerate(doc):\n inv_index[word][title].append(pos)\n\n self.inv_index = inv_index\n # ------------------------------------------------------------------\n\n # turn self.docs into a map from ID to bag of words\n id_to_bag_of_words = {}\n for d, doc in enumerate(self.docs):\n bag_of_words = set(doc)\n id_to_bag_of_words[d] = bag_of_words\n self.docs = id_to_bag_of_words", "def sentence_to_indices(sentence, word_dict):\n return [word_dict.to_index(word) for word in word_tokenize(sentence)]", "def text2vec(self, maxlen):\n # Vocab = {word : index}\n self.Vocab = dict()\n\n for SentenceLabel in self.Pos + self.Neg:\n vector = [0] * maxlen\n for index, word in enumerate(SentenceLabel[0]):\n if index >= maxlen:\n break\n if word not in self.Vocab.keys():\n self.Vocab[word] = len(self.Vocab)\n vector[index] = len(self.Vocab) - 1\n else:\n vector[index] = self.Vocab[word]\n SentenceLabel[0] = vector\n self.doConvert = True", "def build_inverted_index(msgs):\n # YOUR CODE HERE\n inverted_idx = dict()\n\n temp = dict()\n\n # msgs here is the item dict \n for item in msgs:\n temp[item['id']] = item\n\n for i in range(1,9046):\n if i in temp:\n item = temp[i]\n toks = tokenize(item['name']) + tokenize(item['better'])\n counts = Counter(toks)\n for word, value in counts.items():\n if word in inverted_idx.keys():\n inverted_idx[word].append((item['id'],value))\n else:\n inverted_idx[word] = [(item['id'], value)]\n\n return inverted_idx", "def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))", "def build_vocab(self, min_count=3):\n word2count = defaultdict(int)\n for sentence in self.tokenized_corpus:\n for word in sentence:\n word2count[word] += 1\n\n word2dict = {}\n word2dict['PAD'] = {'id': 0}\n word2dict['UNK'] = {'id': 1}\n for word in word2count:\n if word2count[word] >= min_count:\n word2dict[word] = {'id': len(word2dict), 'count': word2count[word]}\n self.vocab = word2dict", "def convert_to_idx(lines):\n for idx, l in enumerate(lines):\n line_temp = []\n for v in l:\n try:\n line_temp.append(vocab_idx[v])\n except KeyError:\n line_temp.append(vocab_idx['<unk>'])\n lines[idx] = line_temp\n return lines", "def generate_vocabulary():\n stop_words = load_stop_words()\n words = ' '.join(generate_corpus()).split()\n print(len(words))\n vocabulary = {}\n for word in words:\n if word in stop_words:\n continue\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary = dict(sorted(vocabulary.items(), key=lambda x: x[1], reverse=True))\n return vocabulary", "def load_word_embeddings(self, word_embeddings, word_to_ix):\n logger.info(\"Loading the vocabulary\")\n self.vocab = {}\n self.index2word = []\n counts = {}\n for word in word_to_ix:\n counts[word] = counts.get(word, 0) + 1\n self.vocab_size = len(counts)\n self.vector_size = word_embeddings.shape[1]\n self.vectors = np.zeros((self.vocab_size, self.vector_size))\n self.index2word = [None] * self.vocab_size\n logger.info(\"Corpus has %i words\", len(self.vocab))\n for word_id, word in enumerate(counts):\n self.vocab[word] = Vocab(index=word_id, count=counts[word])\n self.vectors[word_id] = word_embeddings[word_to_ix[word]]\n self.index2word[word_id] = word\n assert((len(self.vocab), self.vector_size) == self.vectors.shape)\n logger.info(\"Loaded matrix of %d size and %d dimensions\", self.vocab_size, self.vector_size)", "def idx_sentence(sentence, word2id_dict):\r\n x = []\r\n words = sentence.split()\r\n for word in words:\r\n x.append(word2id_dict[word]) # 假设word就在word2idx_dict中.\r\n return x", "def vocab_from_w2v(word_vectors: gensim.models.word2vec.Word2Vec) -> Dict[str, int]:\n vocab = {\"<PAD>\": 0, \"<UNK>\": 1}\n for index, word in enumerate(word_vectors.wv.index2word):\n vocab[word] = index + 2\n return vocab", "def sentences_to_indices(X, word_to_index, max_len):\n \n m = X.shape[0] # number of training examples\n \n ### START CODE HERE ###\n # Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)\n X_indices = np.zeros((m, max_len))\n \n for i in range(m): # loop over training examples\n \n # Convert the ith training sentence in lower case and split is into words. You should get a list of words.\n sentence_words = X[i].lower().split()\n \n # Initialize j to 0\n j = 0\n \n # Loop over the words of sentence_words\n\n for w in sentence_words:\n # if w exists in the word_to_index dictionary\n if w in word_to_index: # if w in word_to_index.keys():\n # Set the (i,j)th entry of X_indices to the index of the correct word.\n X_indices[i, j] = word_to_index[w]\n # Increment j to j + 1\n j = j + 1\n \n ### END CODE HERE ###\n \n return X_indices", "def create_feature_space(sentences):\n splits = [s.split() for s in sentences]\n types = set(reduce(lambda x, y: x + y, splits))\n lookup = dict()\n for i, word in enumerate(types):\n lookup[word] = i\n return lookup", "def build_tf_dict(self, sentences):\n tf_dict = defaultdict(int)\n for sentence in sentences:\n for word in sentence:\n tf_dict[word] += 1\n return tf_dict", "def build_input_data(sentences, vocabulary):\n index_list = []\n for word in sentences:\n tmp = vocabulary[word]\n index_list.append(tmp)\n x = np.array(index_list)\n return x", "def get_idx_from_sent(sent, word_idx_map, k=300):\n x = []\n words = list(jieba.cut(sent, cut_all=False)) \n\n \n for word in words:\n \n if word in word_idx_map:\n x.append(word_idx_map[word])\n return x", "def build_wagner_vocabulary(data_annotations, keys_list):\n vocab = list('SOS')\n vocab.append('EOS')\n vocab.append('SLC')\n\n for key in keys_list:\n lyric_tokens = data_annotations[key]['lyrics']\n\n for sentence in lyric_tokens:\n for token in sentence:\n if token not in vocab:\n vocab.append(token)\n\n word2indx = {w: indx for (indx, w) in enumerate(vocab)}\n indx2word = {indx: w for (indx, w) in enumerate(vocab)}\n\n return word2indx, indx2word", "def gen_indexed_matrix(words, embd_dict):\n embd_matrix = [embd_dict[word] for word in words]\n \n return IndexedMatrix(words, embd_matrix)", "def get_W(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k)) \n W[0] = np.zeros(k)\n\n for i, word in enumerate(word_vecs):\n W[i+1] = word_vecs[word] # i+1 as i=0 is already filled with zeros\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def prepare_inputs(token_mapping, w2v_W, w2v_U, sentences):\n tokens = [tokenize(token_mapping, sentence) for sentence in sentences] \n \n depth = len(token_mapping)\n one_hot_tokens = []\n for sentence in tokens:\n one_hot_sentence = []\n for i, token in enumerate(sentence):\n if token != token_mapping['#UNK#']:\n one_hot_sentence.append(one_hot_encode(token, depth))\n else:\n if i <= 2:\n context_tokens = sentence[:i] + sentence[i+1:i+3]\n else:\n context_tokens = sentence[i-2:i] + sentence[i+1:i+3]\n context_one_hot = [one_hot_encode(token, depth) for token in context_tokens]\n context_mean = np.mean(np.asarray(context_one_hot), axis=0)\n one_hot_sentence.append(context_mean)\n one_hot_tokens.append(one_hot_sentence)\n \n one_hot_tokens = [np.asarray(ls) for ls in one_hot_tokens]\n vec_tokens = [word2vec(w2v_W, w2v_U, sentence) for sentence in tqdm(one_hot_tokens, desc='Vectorizing tokens')]\n return vec_tokens", "def conv_word_to_indexed_txt(txt_vec):\n\n # transform words into integer indexes, comes out as n x m\n # where n = # txt doc, m = # unique words for whole universe\n vectorizer = CountVectorizer(\n stop_words=customised_stopword,\n analyzer='word'\n )\n # CountVectorizer(ngram_range=(1,2), analyzer='word')\n sparse_count_vec = vectorizer.fit_transform(txt_vec)\n\n # create n x p list of words represented by ints,\n # where p = # words in each documentx\n # written in such a convoluted way for speed optimization purposes\n x_vec, y_vec, count_vec = sparse.find(sparse_count_vec)\n\n # add in duplicates\n x_vec = np.repeat(x_vec, count_vec)\n y_vec = np.repeat(y_vec, count_vec)\n\n # convert to torch variables\n x_vec = torch.tensor(x_vec, dtype=torch.int32)\n y_vec = torch.tensor(y_vec, dtype=torch.float)\n\n # sort the vecs\n sort_ix = torch.argsort(x_vec)\n x_vec = x_vec[sort_ix]\n y_vec = y_vec[sort_ix]\n\n x_vec_bincount = torch.bincount(x_vec.cpu())\n bincount_tup = tuple(int(bincount) for bincount in x_vec_bincount)\n indexed_txt_list = list(torch.split(y_vec, bincount_tup))\n\n # the dictionary key to match each word to int\n vocab_dict = vectorizer.vocabulary_\n\n print(\"Converted words to indexes of integers.\")\n\n vocab_count = sparse_count_vec.data\n\n return indexed_txt_list, vocab_dict, vocab_count", "def get_W(word_vecs, k):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k)) \n W[0] = np.zeros(k)\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "def one_hot_vocab_encoding(w2vp: W2VPreprocessor \n ) -> Dict[str, np.ndarray]:\n return {\n w: i for i, w in enumerate(w2vp.vocabulary)\n }", "def get_W(word_vecs, k=200):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k)) \n W[0] = np.zeros(k)\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def predict_sentences_2_idxs(self):\n fo = open(self.config.parsed_predict_file, 'w')\n self.load_dicts()\n\n questions = pd.read_csv(self.config.predict_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx[self.config.unknown_token]\n\n for quest in questions.question_text:\n tokens = preprocess_text(quest)\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n fo.write((str(\" \".join(str(num) for num in idxs)) + \"\\n\"))", "def get_word_vector(doc_id, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n for count_vec, word in enumerate(inv_index):\n word_vec[count_vec] = inv_index[word].get(doc_id, {'frequency': 0})['frequency']\n return word_vec", "def load_word_vectors(self, sentence_entry):\n word_vectors = []\n for token, lemma in zip(sentence_entry.tokens, sentence_entry.lemmas):\n # Go through the lookup chain. If one of these is found in the vsm,\n # return it, else use the fallback and report oov\n for s in [token, token.lower(), lemma, lemma.lower()]:\n if self.embeddings.contains_word(s):\n vector = self.embeddings.word_to_vec(s)\n self.statistics.known_token()\n break\n else:\n self.statistics.unknown_token()\n vector = self.embeddings.get_zero_fallback()\n\n word_vectors.append(vector)\n return word_vectors", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(vocab)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size, k), dtype='float32')\n i = 0\n for word in vocab:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n # W[0] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)", "def getVectors(self):\n vectors = dict()\n i = 0\n N = len(self.db.invertedIndex)\n for w, (idf, docs) in self.db.invertedIndex.items():\n for doc, tf in docs.items():\n try:\n vectors[doc][i] = tf * idf\n except KeyError as k:\n vectors[doc] = {i: tf * idf}\n i += 1\n i = 0;\n return vectors", "def parallel_word_dict(w_list, st, end):\n import spacy\n w_list = w_list[st:end]\n nlp, out_dict, count = spacy.load('en_core_web_lg'), {}, 0\n for word in w_list:\n word_obj = nlp(word)\n if word_obj.has_vector:\n out_dict[word] = word_obj.vector\n count += 1\n return out_dict", "def _init_vocab(self):\n self._word2idx = {}\n self._idx2word = {}\n self.freqs = {}\n self.vocab_size = 0\n\n self._add_word(self.pad_word)\n self._add_word(self.start_word)\n self._add_word(self.end_word)\n self._add_word(self.unk_word)\n\n self.start_word_idx = self.stoi(self.start_word)\n self.end_word_idx = self.stoi(self.end_word)\n self.unk_word_idx = self.stoi(self.unk_word)\n self.pad_word_idx = self.stoi(self.pad_word)\n\n self._special_tokens = {\n 'bos_token': self.start_word,\n 'cls_token': self.start_word,\n 'eos_token': self.end_word,\n 'sep_token': self.end_word,\n 'pad_token': self.pad_word,\n 'unk_token': self.unk_word,\n }\n\n self._special_ids = {\n 'bos_token_id': self.start_word_idx,\n 'cls_token_id': self.start_word_idx,\n 'eos_token_id': self.end_word_idx,\n 'sep_token_id': self.end_word_idx,\n 'pad_token_id': self.pad_word_idx,\n 'unk_token_id': self.unk_word_idx,\n }\n\n self.cls_token_id = self.bos_token_id = self.start_word_idx\n self.eos_token_id = self.sep_token_id = self.end_word_idx\n self.pad_token_id = self.pad_word_idx\n self.unk_token_id = self.unk_word_idx\n\n self.cls_token = self.bos_token = self.start_word\n self.eos_token = self.sep_token = self.end_word\n self.pad_token = self.pad_word\n self.unk_token = self.unk_word", "def _make_word_dictionary(self,annos):\n # get training annos\n train_annos = self.annos[\"train\"]\n # read tokens\n tokens_list = []\n for ann in train_annos:\n tokens_list += [tk for tk in ann[\"tokens\"]]\n # print results: count tokens and show top-n\n print(\"Top-{} tokens list:\".format(self.cfg.DATASET.SHOW_TOP_VOCAB))\n tokens_count = sorted(Counter(tokens_list).items(), key=lambda x:x[1])\n for tk in tokens_count[-self.cfg.DATASET.SHOW_TOP_VOCAB:]:\n print(\"\\t- {}: {}\".format(tk[0],tk[1]))\n # make wtoi, itow\n wtoi = {}\n wtoi[\"<PAD>\"], wtoi[\"<UNK>\"] = 0, 1\n wtoi[\"<S>\"], wtoi[\"<E>\"] = 2, 3\n for i,(tk,cnt) in enumerate(tokens_count):\n idx = i+4 # idx start at 4\n wtoi[tk] = idx\n itow = {v:k for k,v in wtoi.items()}\n self.cfg.MODEL.QUERY.EMB_IDIM = len(wtoi)\n return wtoi, itow", "def get_embedding_matrix(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k), dtype='float32') \n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def get_W(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size + 1, k))\n W[0] = np.zeros(k)\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+2, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32') # padding vector\n i = 1\n for word in vocab:\n \tif word_vecs.has_key(word):\n \tW[i] = word_vecs[word]\n \tword_idx_map[word] = i\n \ti += 1\n else:\n \tword_idx_map[word] = vocab_size+1\n W[vocab_size+1] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def train_word2vec(sentence_matrix, vocabulary_inv,\r\n num_features=300, min_word_count=1, context=10):\r\n\r\n model_name = 'predictor/model/word2vec'\r\n if exists(model_name):\r\n # embedding_model = word2vec.Word2Vec.load(model_name)\r\n embedding_model = gensim.models.Word2Vec.load('predictor/model/word2vec')\r\n print('Load existing Word2Vec model \\'%s\\'' % split(model_name)[-1])\r\n else:\r\n # Set values for various parameters\r\n num_workers = 2 # Number of threads to run in parallel\r\n downsampling = 1e-3 # Downsample setting for frequent words\r\n\r\n # Initialize and train the model\r\n print('Training Word2Vec model...')\r\n sentences = [[vocabulary_inv[w] for w in s] for s in sentence_matrix]\r\n embedding_model = word2vec.Word2Vec(sentences, workers=num_workers,\r\n size=num_features, min_count=min_word_count,\r\n window=context, sample=downsampling)\r\n\r\n # If we don't plan to train the model any further, calling\r\n # init_sims will make the model much more memory-efficient.\r\n embedding_model.init_sims(replace=True)\r\n\r\n # Saving the model for later use. You can load it later using Word2Vec.load()\r\n print('Saving Word2Vec model \\'%s\\'' % split(model_name)[-1])\r\n embedding_model.save(model_name)\r\n\r\n # add unknown words\r\n embedding_weights = {key: embedding_model[word] if word in embedding_model else\r\n np.random.uniform(-0.25, 0.25, embedding_model.vector_size)\r\n for key, word in embedding_model.wv.vocab.items()}\r\n return embedding_weights", "def construct_vocab(lines, vocab_size):\n vocab = {}\n for line in lines:\n for word in line:\n if word not in vocab:\n vocab[word] = 1\n else:\n vocab[word] += 1\n \n word2id = {}\n id2word = {}\n word2id['<pad>'] = 0\n word2id['<unk>'] = 1\n id2word[0] = '<pad>'\n id2word[1] = '<pad>'\n \n sorted_word2id = sorted(\n vocab.items(),\n key=operator.itemgetter(1),\n reverse=True\n )\n\n sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]\n\n for ind, word in enumerate(sorted_words):\n word2id[word] = ind + 2\n\n for ind, word in enumerate(sorted_words):\n id2word[ind + 2] = word\n\n return word2id, id2word", "def generate_rel_non_rel_vector(inv_index, doc_scores, start, end):\n\n result_vector = {}\n\n for i in range(start, end):\n doc_id, doc_score = doc_scores[i]\n\n # Get the content of this document which will be in the form of a string\n # convert it into a list of words and create a frequency map of the\n # words\n\n # NOTE: corpus_collection_path is the global variable here\n\n fp = open(str(corpus_collection_path) + \"\\\\\" + doc_id + \".html\")\n content = fp.read().split()\n fp.close()\n\n result_vector = dict(Counter(content))\n\n # Check with the inverted index\n for index_item in inv_index:\n if index_item not in result_vector:\n result_vector[index_item] = 0\n\n return result_vector", "def create_lookup_tables(text):\n word_count = Counter(text)\n #sorted_word = sorted(word_count, key=word_count.get, reverse=True) # key=word_count.get 按照key原始顺序排序,reverse=True 降序\n int_to_vocab = { idx:word for idx,word in enumerate(word_count)}\n vocab_to_int = { word:idx for idx,word in enumerate(word_count)}\n return vocab_to_int, int_to_vocab", "def get_matrix_of_vectors(wv_from_bin, required_words=['softball', 'technology','street','project','fellow','maps','view','fuel','summer','clubhouse','ball','steal','soccer','driving','motor','comedy']):\n import random\n words = list(wv_from_bin.vocab.keys())\n print(\"Shuffling words ...\")\n random.shuffle(words)\n wrds = words[:10000]\n print(\"Putting %i words into word2Ind and matrix M...\" % len(words))\n word2Ind = {}\n M = []\n curInd = 0\n for w in words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n for w in required_words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n M = np.stack(M)\n print(\"Done.\")\n return M, word2Ind", "def get_inverse_mapping(tf_list, idf_dict, num_docs):\n # All unique keys\n inverse_mapping = {}\n for key in idf_dict.keys():\n doc_list = [] # Contains list of docs which contain that term with tf scores\n for i in range(num_docs):\n if key in tf_list[i].keys():\n doc_list.append((i, tf_list[i][key]))\n inverse_mapping[key] = doc_list\n return inverse_mapping", "def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):\n print(\"build------------------\")\n self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey\n # trim by min_count & precalculate downsampling\n self.scale_vocab(trim_rule=trim_rule, update=update)\n self.finalize_vocab(update=update)", "def get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):\n print(\"Generating word embedding...\")\n # load word embeddings\n embedding_dict = {}\n with open(emb_file, \"r\", encoding=\"utf-8\") as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = \"\".join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n\n TRANSLATE = {\n \"-lsb-\": \"[\", \"-rsb-\": \"]\", \"-lrb-\": \"(\", \"-rrb-\": \")\", \"-lcb-\": \"{\",\n \"-rcb-\": \"}\", \"-LSB-\": \"[\", \"-RSB-\": \"]\", \"-LRB-\": \"(\", \"-RRB-\": \")\",\n \"-LCB-\": \"{\", \"-RCB-\": \"}\"\n }\n SPECIAL_TOKENS = [\"<NULL>\", \"<UNK>\", \"<S>\", \"</S>\"]\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def vectorize(self, sentence, embeddings_dict):\n processed_sentence = self.preprocess(sentence)\n\n matrix = []\n for token in processed_sentence:\n if token in embeddings_dict:\n matrix.insert(0, embeddings_dict[token])\n return numpy.matrix(matrix)", "def _load_vocabulary(self) -> Dict[str, int]:\n\n df_existing_vocab = self._db_connection.get_dataframe(table_name='tfidf_vocabulary', schema='encoded_articles')\n\n df_existing_vocab.set_index('word', inplace=True)\n\n return df_existing_vocab['feature_matrix_index'].to_dict()", "def create_lookup_tables(text):\n vocab = set(text.split())\n vocab_to_int = copy.copy(CODES)\n\n for v_i, v in enumerate(vocab, len(CODES)):\n vocab_to_int[v] = v_i\n\n int_to_vocab = {v_i: v for v, v_i in vocab_to_int.items()}\n\n return vocab_to_int, int_to_vocab", "def vectorize_vocabulary(train_tweets_dict, test_tweets_dict):\n\n print(\"Vectorizing ADRMine data vocabulary...\")\n\n tfidf_vectorizer = TfidfVectorizer()\n corpus = []\n\n for i, (k, v) in enumerate(train_tweets_dict.items()):\n corpus.append(v.lower())\n\n for i, (k, v) in enumerate(test_tweets_dict.items()):\n corpus.append(v.lower())\n\n tfidf_vectorizer.fit_transform(corpus)\n #print(Tfidf_vect.vocabulary_)\n #print(len(Tfidf_vect.vocabulary_))\n #print(Tfidf_vect.idf_)\n print(\" size of vocabulary: {}\".format(len(tfidf_vectorizer.vocabulary_)))\n return tfidf_vectorizer", "def constitute_word_dict(self):\r\n\r\n #IS THIS NECESSARY WITH DATABASE??\r\n\r\n if self.using_shelf:\r\n for k_temp in self.get_words():\r\n self.delete_word(k_temp)\r\n\r\n for i_temp in [a_temp for a_temp in self.indexes()\r\n if Index(a_temp) > Index(str(0))]:\r\n\r\n self.add_search_words(Index(i_temp),\r\n self.get_text_from_note(i_temp))\r\n display.noteprint((alerts.ATTENTION,\r\n alerts.WORD_DICT_CONSTITUTED))", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer", "def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index", "def train(self, sentences):\n\n dictionary = Dictionary(sentences)\n\n ft = Word2Vec(sentences, workers=cpu_count(), min_count=5, size=300, seed=12345)\n\n index = WordEmbeddingSimilarityIndex(ft.wv)\n matrix = SparseTermSimilarityMatrix(index, dictionary)\n\n self.dictionary = dictionary\n self.ft = ft\n self.matrix = matrix", "def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def _create_lookup_tables(self, text):\n word_counts = Counter(text)\n sorted_words = sorted(word_counts, key=word_counts.get, reverse=True)\n vocab_to_int = {word: ii for ii, word in enumerate(sorted_words)}\n int_to_vocab = {ii: word for ii, word in enumerate(sorted_words)}\n return (vocab_to_int, int_to_vocab)", "def main(gensim_model_path, out_dir, min_count=None):\n \n gensim_model = Word2Vec.load(gensim_model_path)\n vector_map = VectorMap(128)\n\n if min_count is None:\n min_count = gensim_model.min_count\n \n for string in gensim_model.vocab:\n vocab = gensim_model.vocab[string]\n freq, idx = vocab.count, vocab.index\n if freq < min_count:\n continue\n vector = gensim_model.syn0[idx]\n vector_map.borrow(string, freq, vector)\n \n vector_map.save(out_dir)", "def map_word(self, word):\n for invariance in self.invariances:\n word = invariance.map_word(word)\n return word" ]
[ "0.796555", "0.78444034", "0.76602143", "0.765326", "0.7552341", "0.74616605", "0.69504786", "0.69184107", "0.68647057", "0.6849758", "0.6777216", "0.6661577", "0.6608296", "0.6554376", "0.6551425", "0.6457603", "0.6450155", "0.6442917", "0.64372295", "0.6427494", "0.6417837", "0.6406366", "0.63716626", "0.63412863", "0.6321773", "0.6309697", "0.62368363", "0.623574", "0.62157696", "0.6175574", "0.61610305", "0.6129864", "0.61178905", "0.6108548", "0.60812145", "0.6075543", "0.60716707", "0.60646623", "0.60541284", "0.6032184", "0.602924", "0.6018248", "0.6006332", "0.5999235", "0.599632", "0.5987654", "0.5970105", "0.5969272", "0.59544", "0.59432936", "0.5939776", "0.5936942", "0.5932763", "0.5930138", "0.59254724", "0.5921069", "0.5917141", "0.5914903", "0.59108853", "0.5895828", "0.5888403", "0.58705616", "0.58478296", "0.58377403", "0.58329743", "0.5826401", "0.5814674", "0.5805841", "0.58045584", "0.5797832", "0.57925355", "0.5780065", "0.57734436", "0.57504904", "0.5746333", "0.5741133", "0.57309186", "0.5727288", "0.5715948", "0.57052016", "0.5697812", "0.5695764", "0.5691818", "0.56846434", "0.5682709", "0.56825125", "0.56784683", "0.567793", "0.56706285", "0.56682974", "0.5668165", "0.5666852", "0.565852", "0.56565386", "0.5653795", "0.5647692", "0.56435335", "0.5634719" ]
0.7715383
4
Maps sentencs and labels to vectors based on a vocabulary.
def build_input_data(sentences, labels, vocabulary, pos1_sentences, pos2_sentences): x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences]) y = np.array(labels) a1 = np.array(pos1_sentences) a2 = np.array(pos2_sentences) return [x, y, a1, a2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def labels2Vec(labels):\r\n global dict_words_n_vectors\r\n\r\n for i in range(len(labels)):\r\n if labels[i] in dict_words_n_vectors:\r\n labels[i] = dict_words_n_vectors[labels[i]]\r\n else:\r\n labels[i] = np.zeros(300)\r\n return np.array(labels, dtype=\"float32\")", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_input_data(sentences, labels, vocabulary):\n vocabulary_inv = {word: index for index, word in vocabulary.items()}\n x = np.array([[vocabulary_inv[word] if word in vocabulary_inv else 0 for word in sent] for sent in sentences])\n y = np.array(labels)\n return [x, y]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def vectorize_vocabulary(train_tweets_dict, test_tweets_dict):\n\n print(\"Vectorizing ADRMine data vocabulary...\")\n\n tfidf_vectorizer = TfidfVectorizer()\n corpus = []\n\n for i, (k, v) in enumerate(train_tweets_dict.items()):\n corpus.append(v.lower())\n\n for i, (k, v) in enumerate(test_tweets_dict.items()):\n corpus.append(v.lower())\n\n tfidf_vectorizer.fit_transform(corpus)\n #print(Tfidf_vect.vocabulary_)\n #print(len(Tfidf_vect.vocabulary_))\n #print(Tfidf_vect.idf_)\n print(\" size of vocabulary: {}\".format(len(tfidf_vectorizer.vocabulary_)))\n return tfidf_vectorizer", "def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec", "def build_vocab(self, sentences):\n\t\t# Build the vocab\n\t\tword_counts = collections.Counter(sentences)\n\n\t\t# Mapping from index to word (get the indices of most common words)\n\t\tvocab_inv = [x[0] for x in word_counts.most_common()] # Do we need this?\n\t\tvocab_inv = list(sorted(vocab_inv))\n\n\t\t# Mapping from word to index\n\n\t\tvocab = {x: i for i,x in enumerate(vocab_inv)}\n\n\t\treturn [vocab, vocab_inv]", "def vectorize_labels(self):\n label_counter = Counter(self.raw_labels)\n if 'oos' in label_counter:\n label_counter.pop('oos')\n unique_labels, label_cnts = zip(*sorted(label_counter.items()))\n unique_labels, label_cnts = list(unique_labels), list(label_cnts)\n label_vocab = {label: index for index, label in enumerate(unique_labels)}\n vectorized_labels = [label_vocab.get(label, -1) for label in self.raw_labels]\n return label_vocab, vectorized_labels, label_cnts", "def word2vec_mapping_func():\n return {\"belonging to\": \"belonging\", \"parked on\": \"parked\", \"growing on\": \"growing\", \"standing on\": \"standing\",\n \"made of\": \"made\", \"attached to\": \"attached\", \"hanging from\": \"hanging\", \"in front of\": \"front\",\n \"lying on\": \"lying\", \"flying in\": \"flying\", \"looking at\": \"looking\", \"on back of\": \"back\",\n \"laying on\": \"laying\", \"walking on\": \"walking\", \"walking in\": \"walking\", \"sitting on\": \"sitting\",\n \"covered in\": \"covered\", \"part of\": \"part\", \"painted on\": \"painted\", \"mounted on\": \"mounted\"}", "def text2vec(self, maxlen):\n # Vocab = {word : index}\n self.Vocab = dict()\n\n for SentenceLabel in self.Pos + self.Neg:\n vector = [0] * maxlen\n for index, word in enumerate(SentenceLabel[0]):\n if index >= maxlen:\n break\n if word not in self.Vocab.keys():\n self.Vocab[word] = len(self.Vocab)\n vector[index] = len(self.Vocab) - 1\n else:\n vector[index] = self.Vocab[word]\n SentenceLabel[0] = vector\n self.doConvert = True", "def word2vec(self, sentence: str):\n tokens = nltk.word_tokenize(sentence)\n v = [self.word_dict.get(token, 0) for token in tokens]\n return v", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def stem2vec(self, sentence_vector, voc=None):\n if voc is None:\n voc = self.vocabulary\n\n vec = np.zeros(len(voc))\n\n for word in sentence_vector:\n index = voc[word]\n vec[index] += 1\n\n return vec", "def add_vecs_to_vocab(vocab, vectors):\n length = len(vectors[0][1])\n vocab.reset_vectors(width=length)\n for word, vec in vectors:\n vocab.set_vector(word, vector=vec)\n return vocab", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def prepare_inputs(token_mapping, w2v_W, w2v_U, sentences):\n tokens = [tokenize(token_mapping, sentence) for sentence in sentences] \n \n depth = len(token_mapping)\n one_hot_tokens = []\n for sentence in tokens:\n one_hot_sentence = []\n for i, token in enumerate(sentence):\n if token != token_mapping['#UNK#']:\n one_hot_sentence.append(one_hot_encode(token, depth))\n else:\n if i <= 2:\n context_tokens = sentence[:i] + sentence[i+1:i+3]\n else:\n context_tokens = sentence[i-2:i] + sentence[i+1:i+3]\n context_one_hot = [one_hot_encode(token, depth) for token in context_tokens]\n context_mean = np.mean(np.asarray(context_one_hot), axis=0)\n one_hot_sentence.append(context_mean)\n one_hot_tokens.append(one_hot_sentence)\n \n one_hot_tokens = [np.asarray(ls) for ls in one_hot_tokens]\n vec_tokens = [word2vec(w2v_W, w2v_U, sentence) for sentence in tqdm(one_hot_tokens, desc='Vectorizing tokens')]\n return vec_tokens", "def build_input_data(sentences, labels, vocabulary):\n # x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])\n\n # Uncomment this if we have unprecedented tokens\n for sentence_i in range(len(sentences)):\n for word_j in range(len(sentences[sentence_i])):\n if sentences[sentence_i][word_j] in vocabulary:\n sentences[sentence_i][word_j] = vocabulary[sentences[sentence_i][word_j]]\n else:\n sentences[sentence_i][word_j] = 1\n x = np.array(sentences)\n y = np.array(labels)\n return [x, y]", "def get_matrix_of_vectors(wv_from_bin, required_words=['softball', 'technology','street','project','fellow','maps','view','fuel','summer','clubhouse','ball','steal','soccer','driving','motor','comedy']):\n import random\n words = list(wv_from_bin.vocab.keys())\n print(\"Shuffling words ...\")\n random.shuffle(words)\n wrds = words[:10000]\n print(\"Putting %i words into word2Ind and matrix M...\" % len(words))\n word2Ind = {}\n M = []\n curInd = 0\n for w in words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n for w in required_words:\n try:\n M.append(wv_from_bin.word_vec(w))\n word2Ind[w] = curInd\n curInd += 1\n except KeyError:\n continue\n M = np.stack(M)\n print(\"Done.\")\n return M, word2Ind", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def word_vecs(self, raw_label=False):\n utterances, labels = self.read_json()\n # print(utterances)\n # print(self.label_dict)\n utterances = [self.word2vec(u) for u in utterances]\n if raw_label:\n labels = labels\n else:\n labels = [self.label_dict[l] for l in labels]\n\n return utterances, labels", "def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def build_Wordv(word2vec_dict, k):\r\n vocab_size = len(word2vec_dict)\r\n word2id_dict = dict()\r\n W = np.zeros(shape=(vocab_size + 1, k))\r\n W[0] = np.zeros(k)\r\n i = 1\r\n for word in word2vec_dict:\r\n # print type(word), ' | ', word\r\n W[i] = word2vec_dict[word]\r\n # print type(W[i]), \" | \", W[i]\r\n word2id_dict[word] = i\r\n i += 1\r\n return W, word2id_dict", "def build_input_data(sentences, vocabulary):\n index_list = []\n for word in sentences:\n tmp = vocabulary[word]\n index_list.append(tmp)\n x = np.array(index_list)\n return x", "def build_input_data(sentences, labels, vocabulary):\n x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])\n y = np.array(labels)\n return [x, y]", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def seq2Vec(sequences):\r\n global dict_words_n_vectors\r\n for sent in sequences:\r\n for i in range(len(sent)):\r\n if sent[i] in dict_words_n_vectors:\r\n sent[i] = dict_words_n_vectors[sent[i]]\r\n else:\r\n sent[i] = np.zeros(300)\r\n return np.array(sequences, dtype=\"float32\")", "def build_input_data(sentences, labels, vocabulary):\n x = np.array([[vocabulary[word] if word in vocabulary else vocabulary['$']\n for word in sentence] for sentence in sentences])\n y = np.array(labels)\n return [x, y]", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def local_vocabulary(tuples, voc):\n new_tuples = []\n local_voc0 = Indexer()\n for t, v in tuples:\n new_t = tuple([local_voc0.string_to_int(w) for w in t])\n new_tuples.append((new_t, v))\n local_voc = []\n for w in local_voc0.index_to_string:\n local_voc.append(voc(w))\n return new_tuples, local_voc, voc", "def sentence_to_vec(s, embeddings_dict, stop_words, tokenizer):\n \n words = str(s).lower()\n words = tokenizer(words)\n # remove stop words, if any, and only alpha-numeric tokens\n words = [w for w in words if not w in stop_words and w.isalpha()]\n \n embeddings = []\n for w in words:\n if w in embeddings_dict:\n embeddings.append(embeddings_dict[w])\n \n # dimensions = 300\n if len(embeddings)==0:\n return np.zeros(300)\n\n # list of embeddings to array\n embeddings = np.array(embeddings)\n\n # normalized vector\n sum = embeddings.sum(axis=0)\n return sum/np.sqrt((sum**2).sum())", "def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def process_label(intents, w2v,class_id_startpoint=0):\n class_dict = {}\n label_vec = []\n class_id = class_id_startpoint\n \n for line in intents:\n # check whether all the words in w2v dict\n line=line[0]\n label = line.split(' ')\n for w in label:\n if not w in w2v.vocab:\n print('not in w2v dict', w)\n\n # compute label vec\n label_sum = np.sum([w2v[w] for w in label], axis = 0)\n label_vec.append(label_sum)\n # store class names => index\n class_dict[' '.join(label)] = class_id\n class_id = class_id + 1\n #print('=====label vec', label_vec)\n return class_dict, np.asarray(label_vec)", "def create_vectors(list_dict, num_words):\n x = [] # list that will hold data \n\n for d in list_dict:\n # initializing numpy vector\n # it contains 5,000 (number of words) zeros\n temp = np.zeros(num_words, dtype=np.float64)\n for key, val in d.items():\n if key < num_words:\n key -= 1 # indexing in data starts at 1\n temp[key] = 1 # adding word and its frequency to vector \n # temp[key] = val\n x.append(temp) # appends vector to x \n\n return x", "def generate_vocab_dict(vocab):\n v_dict = {}\n for word in vocab:\n if len(word) in v_dict:\n v_dict[len(word)].append(word)\n else:\n v_dict[len(word)] = [word]\n return v_dict", "def load_word_vectors(self, sentence_entry):\n word_vectors = []\n for token, lemma in zip(sentence_entry.tokens, sentence_entry.lemmas):\n # Go through the lookup chain. If one of these is found in the vsm,\n # return it, else use the fallback and report oov\n for s in [token, token.lower(), lemma, lemma.lower()]:\n if self.embeddings.contains_word(s):\n vector = self.embeddings.word_to_vec(s)\n self.statistics.known_token()\n break\n else:\n self.statistics.unknown_token()\n vector = self.embeddings.get_zero_fallback()\n\n word_vectors.append(vector)\n return word_vectors", "def _build_vocab(self, sentences, markers=[]):\n from snorkel.learning.pytorch.rnn.utils import SymbolTable\n\n vocab = Counter()\n for sent in sentences:\n for w in sent:\n vocab[w] += 1\n word_dict = SymbolTable()\n list(map(word_dict.get, vocab))\n list(map(word_dict.get, markers))\n return word_dict", "def build_input_data(sentences, vocabulary):\n count = 0\n seq2seq_sentences = []\n for sentence in sentences:\n seq2seq_sentence = []\n for word in sentence:\n try:\n seq2seq_sentence.append(vocabulary[word])\n except KeyError:\n seq2seq_sentence.append(vocabulary['</s>'])\n count += 1\n seq2seq_sentences.append(seq2seq_sentence)\n print count\n return np.array(seq2seq_sentences)", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def vectorize(self, sentence, embeddings_dict):\n processed_sentence = self.preprocess(sentence)\n\n matrix = []\n for token in processed_sentence:\n if token in embeddings_dict:\n matrix.insert(0, embeddings_dict[token])\n return numpy.matrix(matrix)", "def vectorize(self, sentences, _ngrams=1):\n\n if self.__verbose:\n print('Vectorizing', len(sentences), 'sentences')\n\n vectors = []\n\n for sent in sentences:\n v = []\n for gram in self.ngrams(sent, _ngrams):\n if gram in self.__dictionary:\n v.append(self.__dictionary[gram])\n else:\n v.append(self.__dictionary['unk'])\n vectors.append(v)\n\n return np.asarray(vectors)", "def vocab_from_vectors(vector_kwargs_list, vocab_kwargs):\n \n assert len(vector_kwargs_list) > 0\n vocab_kwargs = deepcopy(vocab_kwargs)\n \n # obtain vectors and counter from list of vector creating keyword arguments\n vectors = list()\n vocab_kwargs[\"counter\"] = Counter()\n \n for kwargs in vector_kwargs_list:\n vecs = Vectors(**kwargs)\n vectors.append(vecs)\n vocab_kwargs[\"counter\"].update(vecs.itos)\n \n vocab_kwargs[\"vectors\"] = vectors\n vocab = Vocab(**vocab_kwargs)\n\n return vocab", "def vocab_from_w2v(word_vectors: gensim.models.word2vec.Word2Vec) -> Dict[str, int]:\n vocab = {\"<PAD>\": 0, \"<UNK>\": 1}\n for index, word in enumerate(word_vectors.wv.index2word):\n vocab[word] = index + 2\n return vocab", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def load_word_embeddings(self, word_embeddings, word_to_ix):\n logger.info(\"Loading the vocabulary\")\n self.vocab = {}\n self.index2word = []\n counts = {}\n for word in word_to_ix:\n counts[word] = counts.get(word, 0) + 1\n self.vocab_size = len(counts)\n self.vector_size = word_embeddings.shape[1]\n self.vectors = np.zeros((self.vocab_size, self.vector_size))\n self.index2word = [None] * self.vocab_size\n logger.info(\"Corpus has %i words\", len(self.vocab))\n for word_id, word in enumerate(counts):\n self.vocab[word] = Vocab(index=word_id, count=counts[word])\n self.vectors[word_id] = word_embeddings[word_to_ix[word]]\n self.index2word[word_id] = word\n assert((len(self.vocab), self.vector_size) == self.vectors.shape)\n logger.info(\"Loaded matrix of %d size and %d dimensions\", self.vocab_size, self.vector_size)", "def get_word_vector(doc_id, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n for count_vec, word in enumerate(inv_index):\n word_vec[count_vec] = inv_index[word].get(doc_id, {'frequency': 0})['frequency']\n return word_vec", "def word2vec_generation(self, utterance, with_punctuations):\n vector = []\n\n #words = self.text_preparation(utterance)\n\n words = utterance\n\n #model_ = Word2Vec.load('model.bin')\n #if not self.is_word_in_word2vec_vocabulary(utterance, model_):\n # self.retrain_model([words])\n\n if with_punctuations:\n new_model = Word2Vec.load('./model/model_word2vec.bin')\n else:\n new_model = Word2Vec.load('./model/model_no_punctuation_word2vec.bin')\n\n\n\n # TODO: how generate word2vec vectors for each utterance using the vocabularies in Word2vec model?\n\n #First: average of Word2Vec vectors in each utterance\n for w in words:\n vector.append(new_model.wv[w])\n\n return np.mean(vector, axis=0)", "def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))", "def create_vectors(\n dataset_path_train: str, dataset_path_test: str,\n vectors_path_train: str, vectors_path_test: str\n) -> int:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n df_train = pd.read_csv(\n f\"/data/{dataset_path_train}\",\n index_col=\"id\",\n dtype={**dtypes, \"target\": int},\n converters={\"tokens\": ast.literal_eval})\n df_train[\"text_preprocessed\"] = df_train[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n df_test = pd.read_csv(\n f\"/data/{dataset_path_test}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df_test[\"text_preprocessed\"] = df_test[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n vectorizer = sklearn.feature_extraction.text.CountVectorizer()\n vectors_train = vectorizer.fit_transform(df_train[\"text_preprocessed\"])\n vectors_test = vectorizer.transform(df_test[\"text_preprocessed\"])\n\n with open(f\"/data/{vectors_path_train}\", \"wb\") as f:\n pickle.dump(vectors_train, f)\n with open(f\"/data/{vectors_path_test}\", \"wb\") as f:\n pickle.dump(vectors_test, f)\n\n return 0", "def build_input_data_from_word2vec(sentence, word2vec_vocab, word2vec_vec):\n X_data = []\n for word in sentence:\n try:\n word2vec_index = word2vec_vocab[word].index\n word_vector = word2vec_vec[word2vec_index]\n except:\n word2vec_index = word2vec_vocab['<un_known>'].index\n word_vector = word2vec_vec[word2vec_index]\n #word_vector = np.random.uniform(low=-0.25, high=0.25, size=word2vec_vec.shape[1])\n X_data.append(word_vector)\n X_data = np.asarray(X_data)\n return X_data", "def get_vocabulary(text_fname, vocab_fname):\n with codecs.open(text_fname,'r','utf-8') as infile, \\\n codecs.open(vocab_fname,'w','utf-8') as outfile: \n\n count_map={}\n for line in infile:\n sent=line.strip().split(' ')\n for w in sent:\n count_map[w]=count_map.get(w,0.0)+1.0\n\n for w,c in count_map.iteritems(): \n outfile.write(u'{}|{}\\n'.format(w,c))", "def convert_by_vocab(vocab, items):\n output = []\n for item in items:\n output.append(vocab[item])\n return output", "def vectorize_tweet(tweet):\n tweet_vector = np.zeros(100)\n for word in tokenize(tweet.text):\n if word in word2vec.wv.vocab:\n tweet_vector = tweet_vector + word2vec[word]\n\n components = pca.transform(tweet_vector)\n x = components[0, 0]\n y = components[0, 1]\n\n return str(x), str(y)", "def vectorize(vector_space, sentence):\n vector = [0] * len(vector_space)\n for word in sentence[0].split():\n vector[vector_space[word]] = 1\n return vector", "def build_vocabulary(self, tokens=None, embeddings=None):\n if tokens is not None and embeddings is not None:\n raise ValueError(\"Only accepts either `tokens` or `embeddings`.\")\n\n if tokens is not None:\n # Build from tokenized tokens\n # for sentence in tqdm(tokens):\n # for word in tokens:\n # print(type(word))\n # exit()\n self.vocab.extend(\n list(set([\n word\n for sentence in tqdm(tokens)\n for word in sentence\n ]))\n )\n elif embeddings is not None:\n # Build from pretrained embeddings\n for word in tqdm(embeddings):\n word = word.strip(\"\\n\")\n word = word.split(\" \")\n\n self.vocab.append(word[0])\n vector = word[1:]\n self.vectors.append(vector)", "def sentence_to_embedding(sent, word_to_vec, seq_len, embedding_dim):\n embedding_vec = np.zeros((seq_len,embedding_dim))\n for i in range(min(len(sent),seq_len)):\n embedding_vec[i,:] = word_to_vec.get(sent[i])\n return embedding_vec", "def load_glove_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n for i,line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n if word in vocab:\n word_vecs[word] = np.array(L[1:], dtype='float32')\n return word_vecs", "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def tf_keras_vectorize(train_texts: List[str], test_texts: List[str]) -> Tuple[Any, Any, Dict[str, int]]:\n # create vocabulary with training texts\n tokenizer = Tokenizer(num_words=TOP_K)\n tokenizer.fit_on_texts(train_texts)\n\n # vectorize the training/test texts\n x_train = tokenizer.texts_to_sequences(train_texts)\n x_test = tokenizer.texts_to_sequences(test_texts)\n\n # Get max sequence length\n max_length = len(max(x_train, key=len))\n if max_length > MAX_SEQUENCE_LENGTH:\n max_length = MAX_SEQUENCE_LENGTH\n\n x_train = pad_sequences(x_train, maxlen=max_length)\n x_test = pad_sequences(x_test, maxlen=max_length)\n return x_train, x_test, tokenizer.word_index", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def main(gensim_model_path, out_dir, min_count=None):\n \n gensim_model = Word2Vec.load(gensim_model_path)\n vector_map = VectorMap(128)\n\n if min_count is None:\n min_count = gensim_model.min_count\n \n for string in gensim_model.vocab:\n vocab = gensim_model.vocab[string]\n freq, idx = vocab.count, vocab.index\n if freq < min_count:\n continue\n vector = gensim_model.syn0[idx]\n vector_map.borrow(string, freq, vector)\n \n vector_map.save(out_dir)", "def load_vector_dictionary():\n return read_word2vecs_from_file(VECTOR_FILE)", "def get_sentence_to_context_map(sentences):\n # Load the vocab\n en_vocab = get_english_vocab(DATA_DIR,VOCAB_SIZE)\n\n # Allocate the sentences to buckets\n bucketed = {}\n for sentence in sentences:\n bucket_id = get_bucket(en_vocab,sentence)\n bucketed.setdefault(bucket_id,[])\n bucketed[bucket_id].append(sentence)\n\n mapped = {}\n with tf.Session() as sess:\n # Create model and load parameters.\n model = create_model(sess, True, train_dir=TRAIN_DIR)\n model.batch_size = BATCH_SIZE # We decode 64 sentence at a time.\n # Iterate over each bucket\n for bucket_id,sentences in bucketed.iteritems():\n for batch in chunker(sentences,BATCH_SIZE):\n data = []\n # Tokenize each sentence\n for sentence in batch:\n token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)\n expected_output = []\n data.append((token_ids, expected_output))\n # Use the model to obtain contexts for each sentence in the batch\n encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: data}, bucket_id)\n contexts = model.step_context(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id)\n features = np.hstack(contexts)\n print 'Encoded {0} sentences into {1} dimensional vectors'.format(*features.shape)\n # Now we align sentences with their contexts\n for i,sentence in enumerate(batch):\n mapped[sentence] = features[i,:].tolist()\n return mapped", "def _convert_by_vocab(vocab, items):\n output = []\n for item in items:\n output.append(vocab[item])\n return output", "def sentences2vec(self, sentences, unseen=None):\r\n keys = self.keys\r\n # print(sentences)\r\n if unseen:\r\n unseen_vec = self.model.wv.word_vec(unseen)\r\n\r\n # if unseen:\r\n # vec.append([self.model.wv.word_vec(y) if y in set(sentences) & keys\r\n # else unseen_vec for y in sentences])\r\n # else:\r\n # vec.append([self.model.wv.word_vec(y) for y in sentences\r\n # if y in set(sentences) & keys])\r\n vec = np.array([0 for _ in range(300)])\r\n for y in sentences:\r\n if len(vec) == 0:\r\n vec = np.array(self.model.wv.word_vec(y))\r\n elif y in self.keys:\r\n vec = vec + np.array(self.model.wv.word_vec(y))\r\n # print(len(vec))\r\n return vec", "def bag_of_words_vectorizer(datafile, k_features):\n data = []\n labels = []\n\n for jsoned_entity in open(\"data.json\", errors=\"ignore\").readlines():\n entity = json.loads(jsoned_entity)\n if entity[\"lang\"] == \"en\":\n data.append(entity[\"text\"])\n labels.append(entity[\"label\"])\n\n vectorizer = TfidfVectorizer(stop_words=get_stop_words(\"english\"))\n data = vectorizer.fit_transform(data)\n data = SelectKBest(chi2, k=k_features).fit_transform(data, labels)\n\n for vector_label_batch in batch(zip(data, labels), config.BATCH_SIZE):\n vectors = []\n labels = []\n for vec_label in vector_label_batch:\n vectors.append(vec_label[0].toarray())\n labels.append(vec_label[1])\n\n X = np.vstack(vectors)\n Y = np_utils.to_categorical(labels, 2)\n yield X, Y", "def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )", "def create_vector(string):\n vec = {}\n words = string.split()\n\n for word in words:\n if len(word) <= NGRAM_SIZE:\n add(vec, word)\n else:\n for i in range(len(word) - NGRAM_SIZE + 1):\n add(vec, word[i : i + NGRAM_SIZE])\n\n return vec", "def convert_by_vocab(vocab, items):\n output = []\n for item in items:\n output.append(vocab[item])\n return output", "def build_data_vectors(annotations, tweets, Tfidf_vect, adr_lexicon_dict, should_balance_set=True):\n\n def vectorize_word(word):\n \"\"\"gives vectorized value from TfidfVectorizer for the given word\n If the word is not part of vocabulary, 0 will be returned\n\n # Arguments\n word - word to vectorize\n\n # Returns\n vectorized value\n \"\"\"\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0\n\n def clean_text(text):\n \"\"\"Cleans the text\n This code snippet is taken from https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5\n Author: Susan Li\n\n # Arguments\n text - text to clean\n\n # Returns\n cleaned text\n \"\"\"\n text = text.lower()\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = text.strip(' ')\n return text\n\n X = []\n Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n for i, (k, v) in enumerate(annotations.items()):\n tweet_text = clean_text(tweets[k])\n tokens = word_tokenize(tweet_text)\n\n for annotation_index, annotation in enumerate(v):\n prev_token_adr = False\n\n annotated_text = clean_text(annotation['annotatedText'])\n annotated_text_tokens = word_tokenize(annotated_text)\n\n for index, focus_word in enumerate(tokens):\n focus_vector = []\n\n # for Context feature, get index for 3 surrounding words on each side of focus word\n if program_args.context_feature:\n focus_vector.append(vectorize_word(tokens[index-3]) if (index-3 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-2]) if (index-2 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-1]) if (index-1 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index]))\n focus_vector.append(vectorize_word(tokens[index+1]) if (index+1 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+2]) if (index+2 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+3]) if (index+3 < len(tokens)) else 0)\n\n if program_args.adrlexicon_feature:\n if focus_word in adr_lexicon_dict:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n if program_args.prev_adrlexicon_feature:\n if prev_token_adr:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n # assign class label\n if annotation['semanticType'] == 'ADR' and focus_word in annotated_text_tokens:\n Y.append(ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n adr_labels_size += 1\n prev_token_adr = True\n else:\n Y.append(NON_ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n nonadr_labels_size += 1\n prev_token_adr = False\n\n print(\" Dataset size: {}\".format(len(X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n if should_balance_set:\n X, Y = balance_set(X, Y, adr_labels_size, nonadr_labels_size)\n\n X = scipy.sparse.csr_matrix(X)\n return X, Y", "def vectorize_text(corpus):\n bag_of_words_model = CountVectorizer()\n\n # performs the above described three tasks on the given data corpus.\n dense_vec_matrix = bag_of_words_model.fit_transform(corpus).todense()\n bag_of_word_df = pd.DataFrame(dense_vec_matrix)\n bag_of_word_df.columns = sorted(bag_of_words_model.vocabulary_)\n return bag_of_word_df", "def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs", "def _words_to_vec(self, sentence):\n return torch.FloatTensor([self._use_embeddings(word) for word in sentence])", "def question_to_vec(question, embeddings, dim):\r\n\r\n words = question.split()\r\n\r\n counter = 0\r\n res = np.zeros(dim)\r\n for word in words:\r\n if word in embeddings:\r\n res += np.array(embeddings[word])\r\n counter += 1\r\n if counter!=0:\r\n return res/counter # mean of all word embeddings\r\n else:\r\n return res # vector of zeros\r", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def get_weibo_data(vocab_file, vector_file):\n if os.path.exists(\"word_misc.pkl\"):\n return cPickle.load(open(\"word_misc.pkl\", \"rb\"))\n\n word_misc, word2id, id2word = {}, {}, {}\n word_count = 0\n\n # vocab file\n print \"Building vocabulary ...\"\n for lines in open(vocab_file).readlines():\n word = lines.split()[0]\n if not is_unwanted_words(word, ['', '\\n']):\n word2id[word] = word_count\n id2word[word_count] = word\n word_count += 1\n word2id['_START'] = word_count\n id2word[word_count] = '_START'\n word_count += 1\n word2id['_END'] = word_count\n id2word[word_count] = '_END'\n word_count += 1\n word2id['_UNK'] = word_count\n id2word[word_count] = '_UNK'\n word_count += 1\n word2id['_MASK'] = word_count\n id2word[word_count] = '_MASK'\n word_count += 1\n print \"Vocabulary size:\", word_count\n\n # Initialization is refered to in https://www.tensorflow.org/versions/r0.7/tutorials/word2vec/index.html\n word_emb = (1/np.sqrt(word_count)*(2*np.random.rand(word_count, options['embedding_size']) - 1)).tolist()\n\n # load word vectors\n for lines in open(vector_file).readlines()[1:]:\n word = lines.split()[0]\n #if word == '</s>' or word not in word2id.keys():\n # continue\n if word not in word2id.keys():\n continue\n ids = word2id[word]\n #print ids, lines, len(word_emb)\n word_emb[ids] = [float(w) for w in lines.split()[1:]]\n\n print len(word_emb), \"words have been loaded with\", len(word_emb[0]), \"dimensions\"\n\n # load word misc\n word_misc['id2word'] = id2word\n word_misc['word2id'] = word2id\n word_misc['word_count'] = word_count\n word_misc['word_emb'] = word_emb\n cPickle.dump(word_misc, open(\"word_misc.pkl\", \"wb\"))\n print \"Dump complete.\"\n return word_misc", "def load_word2vect(self, file_path):\n self.embeddings = []\n self.word_to_idx = {'<pad>' : 0}\n self.vocab = ['<pad>']\n\n model = w2v.load(file_path)\n self.embedding_size = model.vectors.shape[1]\n pad_embedding = np.zeros(self.embedding_size, \"float32\")\n self.embeddings.append(pad_embedding)\n\n train_words_set = set([word for text in self.train_data for word in\n text[1].split(\" \")])\n\n for w in model.vocab:\n if w in train_words_set:\n self.word_to_idx[w] = len(self.vocab)\n self.vocab.append(w)\n self.embeddings.append(model[w])\n\n del model", "def compute_avg_w2v_vector(w2v_dict, text_nlp_proc):\n SIZE = 50 # size of the w2v dimension\n list_of_word_vectors = [w2v_dict[w] for w in text_nlp_proc if w in w2v_dict.vocab.keys()]\n if len(list_of_word_vectors) == 0:\n result = [0.0]*SIZE\n else:\n result = np.sum(list_of_word_vectors, axis=0) / len(list_of_word_vectors)\n return result", "def convert_by_vocab(vocab, items):\n output = []\n for item in items:\n\tif item in vocab:\n\t output.append(vocab[item])\n\telse:\n\t output.append(vocab['[UNK]'])\n return output", "def build_vocab(sentences, max_num_words):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)).most_common()\n if max_num_words != 0 and max_num_words < len(word_counts):\n word_counts = word_counts[:max_num_words]\n\n # Mapping from index to word\n vocabulary = dict()\n index = 0\n for x in word_counts:\n vocabulary[index] = x[0]\n index += 1\n\n return vocabulary", "def make_corpus(self, t, v=None):\n v = self.vectorizer\n\n try:\n corpus = v.transform(t)\n except ValueError, e:\n return None, None\n \n vocab = {y:x for x,y in v.vocabulary_.iteritems()}\n corpus = gensim.matutils.Sparse2Corpus(corpus, documents_columns=False)\n return corpus, vocab", "def build_input_data(sentences, labels, vocabulary):\n x = np.array(\n [[vocabulary[word] for word in sentence] for sentence in sentences]\n )\n y = np.array(labels)\n y = y.argmax(axis=1)\n return [x, y]", "def one_hot_vocab_encoding(w2vp: W2VPreprocessor \n ) -> Dict[str, np.ndarray]:\n return {\n w: i for i, w in enumerate(w2vp.vocabulary)\n }", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def get_embedding_matrix(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k), dtype='float32') \n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def get_vector(word, model):\n return model.wv[word]", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(vocab)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size, k), dtype='float32')\n i = 0\n for word in vocab:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n # W[0] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def conv_word_to_indexed_txt(txt_vec):\n\n # transform words into integer indexes, comes out as n x m\n # where n = # txt doc, m = # unique words for whole universe\n vectorizer = CountVectorizer(\n stop_words=customised_stopword,\n analyzer='word'\n )\n # CountVectorizer(ngram_range=(1,2), analyzer='word')\n sparse_count_vec = vectorizer.fit_transform(txt_vec)\n\n # create n x p list of words represented by ints,\n # where p = # words in each documentx\n # written in such a convoluted way for speed optimization purposes\n x_vec, y_vec, count_vec = sparse.find(sparse_count_vec)\n\n # add in duplicates\n x_vec = np.repeat(x_vec, count_vec)\n y_vec = np.repeat(y_vec, count_vec)\n\n # convert to torch variables\n x_vec = torch.tensor(x_vec, dtype=torch.int32)\n y_vec = torch.tensor(y_vec, dtype=torch.float)\n\n # sort the vecs\n sort_ix = torch.argsort(x_vec)\n x_vec = x_vec[sort_ix]\n y_vec = y_vec[sort_ix]\n\n x_vec_bincount = torch.bincount(x_vec.cpu())\n bincount_tup = tuple(int(bincount) for bincount in x_vec_bincount)\n indexed_txt_list = list(torch.split(y_vec, bincount_tup))\n\n # the dictionary key to match each word to int\n vocab_dict = vectorizer.vocabulary_\n\n print(\"Converted words to indexes of integers.\")\n\n vocab_count = sparse_count_vec.data\n\n return indexed_txt_list, vocab_dict, vocab_count", "def glove_embedding(self, texts, file):\n self.embedding_dict = dict()\n glove_file = open(file, encoding='utf-8')\n for line in glove_file:\n word_vector = line.split()\n word = word_vector[0]\n word_vector_arr = np.asarray(word_vector[1:], dtype='float32')\n self.embedding_dict[word] = word_vector_arr\n glove_file.close()\n \n i = 0\n with pgb.ProgressBar(max_value=len(texts)) as bar:\n for text in texts:\n vec = []\n text = text.split()\n for t in text:\n try:\n vec.append(self.embedding_dict[t.lower()])\n except KeyError:\n pass\n ## There are no matched words\n if len(vec) == 0:\n print(\"len 0 vec\")\n self.word_vec.append(np.zeros((100)))\n else:\n #print(np.array(vec))\n #print(np.array(vec).shape)\n sentence = self.sentence_vec(np.array(vec))\n #print(sentence)\n #print(sentence.shape)\n self.word_vec.append(sentence)\n i += 1\n bar.update(i)\n self.word_vec = np.array(self.word_vec)\n print(self.word_vec.shape)", "def convert_to_idx(lines):\n for idx, l in enumerate(lines):\n line_temp = []\n for v in l:\n try:\n line_temp.append(vocab_idx[v])\n except KeyError:\n line_temp.append(vocab_idx['<unk>'])\n lines[idx] = line_temp\n return lines", "def get_W(word_vecs, vocab, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+2, k), dtype='float32')\n W[0] = np.zeros(k, dtype='float32') # padding vector\n i = 1\n for word in vocab:\n \tif word_vecs.has_key(word):\n \tW[i] = word_vecs[word]\n \tword_idx_map[word] = i\n \ti += 1\n else:\n \tword_idx_map[word] = vocab_size+1\n W[vocab_size+1] = np.zeros(k, dtype='float32')\n return W, word_idx_map", "def gen_review_vecs(reviews, model, num_features):\n\n curr_index = 0\n review_feature_vecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n # index2word is a list consisting of all words in the vocabulary\n # Convert list to set for speed\n index2word_set = set(model.wv.index2word)\n for review in reviews:\n\n #if curr_index%1000 == 0.:\n # print \"Vectorizing review %d of %d\" % (curr_index, len(reviews))\n \n review_feature_vecs[curr_index] = review_to_vec(review, model, num_features , index2word_set)\n curr_index += 1\n \n return review_feature_vecs", "def vocabulary(corpus_tokenized):\n vocab = list()\n for element in corpus_tokenized:\n document = element['document']\n for word in document:\n if word not in vocab:\n vocab.append(word)\n return vocab", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary" ]
[ "0.7294427", "0.70792955", "0.70614845", "0.700578", "0.700578", "0.700578", "0.699909", "0.69712275", "0.6877698", "0.68140954", "0.6795766", "0.6701864", "0.66283876", "0.6619812", "0.6589712", "0.6576624", "0.65659446", "0.6559201", "0.6493492", "0.648277", "0.64260834", "0.6416471", "0.64106005", "0.64052486", "0.6399372", "0.63617575", "0.6361493", "0.63566273", "0.63546485", "0.63325065", "0.63291043", "0.6320571", "0.63167566", "0.6310013", "0.63091743", "0.62965417", "0.6288154", "0.62608874", "0.6254384", "0.62478167", "0.6237708", "0.6230789", "0.6217352", "0.6212624", "0.6171727", "0.6158767", "0.61118466", "0.61046994", "0.6101324", "0.60891914", "0.6085239", "0.6076603", "0.6071914", "0.6068483", "0.6062669", "0.6048913", "0.6047568", "0.6039601", "0.60380244", "0.6027107", "0.6023486", "0.6010144", "0.6008467", "0.6007123", "0.599951", "0.5993858", "0.59926236", "0.5991568", "0.5990795", "0.5984202", "0.59807587", "0.59742105", "0.5971145", "0.5964023", "0.5962237", "0.5949299", "0.59380174", "0.5929443", "0.59241253", "0.5918087", "0.5915821", "0.5912167", "0.59107506", "0.5907672", "0.5902142", "0.5895892", "0.589551", "0.5893999", "0.589123", "0.5888183", "0.58805573", "0.58708966", "0.58706886", "0.5870662", "0.5863725", "0.585547", "0.5849288", "0.58446515", "0.5834105", "0.58066094" ]
0.58939606
88
Pads all sentences to the same length. The length is defined by the longest sentence. Returns padded sentences.
def pad_sentences(sentences, padding_word="<PAD/>",sequence_length = 0): if sequence_length == 0: sequence_length = max(len(x) for x in sentences) padded_sentences = [] for i in range(len(sentences)): sentence = sentences[i] num_padding = sequence_length - len(sentence) new_sentence = sentence + [padding_word] * num_padding padded_sentences.append(new_sentence) return padded_sentences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad_sentences(sentences, padding_word=\"<PAD/>\"):\n sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def pad_sentences(self, sentences, padlen, padding_word=\"<PAD/>\"):\n if padlen == None:\n sequence_length = max(len(x) for x in sentences)\n else:\n sequence_length = padlen\n\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences, sequence_length", "def pad_sentences(sentences, saved_sequence_length=None, padding_word=\"<PAD/>\"):\n if saved_sequence_length:\n sequence_length = saved_sequence_length\n else:\n sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n if num_padding < 0:\n new_sentence = sentence[:num_padding] # chop off the end\n else:\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def pad_sentences(sentences, padding_word=\"<PAD/>\"):\n # !!! 一定要注意这里会影响数据的形状,要与代码内的 sequence length 保持一致 !!!\n sequence_length = 30\n # sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i][:sequence_length]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def pad_sentences(sentences, sequence_length=0, padding_word=\"<PAD/>\"):\n if sequence_length == 0:\n sequence_length = max(len(sent) for sent in sentences)\n\n padded_sentences = []\n for sent in sentences:\n if len(sent) < sequence_length:\n num_padding = sequence_length - len(sent)\n new_sentence = sent + [padding_word] * num_padding\n else:\n new_sentence = sent[:sequence_length]\n padded_sentences.append(new_sentence)\n return padded_sentences", "def pad_sentences(sentence, sequence_length, padding_word=\"<PAD/>\"):\r\n sequence_length = 20\r\n sentence_list = sentence.strip().split(' ')\r\n if 200 > len(sentence_list):\r\n num_padding = sequence_length - len(sentence_list)\r\n padding_word = \"<PAD/>\"\r\n new_sentence = sentence_list + [padding_word] * num_padding\r\n else:\r\n new_sentence = sentence_list[0:sequence_length]\r\n return new_sentence", "def pad_sentences(sentences, padding_word=\"<PAD/>\", forced_sequence_length=None):\n if forced_sequence_length is None: # Train\n sequence_length = max(len(x) for x in sentences)\n else: # Prediction\n logging.critical('This is prediction, reading the trained sequence length')\n sequence_length = forced_sequence_length\n logging.critical('The maximum length is {}'.format(sequence_length))\n\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n\n if num_padding < 0: # Prediction: cut off the sentence if it is longer than the sequence length\n logging.info('This sentence has to be cut off because it is longer than trained sequence length')\n padded_sentence = sentence[0:sequence_length]\n else:\n padded_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(padded_sentence)\n return padded_sentences", "def pad_sentences(sentences, padding_word=\"<PAD/>\", forced_sequence_length=None):\n\tif forced_sequence_length is None: # Train\n\t\tsequence_length = max(len(x) for x in sentences)\n\telse: # Prediction\n\t\tlogging.critical('This is prediction, reading the trained sequence length')\n\t\tsequence_length = forced_sequence_length\n\tlogging.critical('The maximum length is {}'.format(sequence_length))\n\n\tpadded_sentences = []\n\tfor i in range(len(sentences)):\n\t\tsentence = sentences[i]\n\t\tnum_padding = sequence_length - len(sentence)\n\n\t\tif num_padding < 0: # Prediction: cut off the sentence if it is longer than the sequence length\n\t\t\tlogging.info('This sentence has to be cut off because it is longer than trained sequence length')\n\t\t\tpadded_sentence = sentence[0:sequence_length]\n\t\telse:\n\t\t\tpadded_sentence = sentence + [padding_word] * num_padding\n\t\tpadded_sentences.append(padded_sentence)\n\treturn padded_sentences", "def pad_sentences(sentences, padding_word=\"PAD\"):\n sequence_length = max(len(x) for x in sentences)\n print('vector size: ' + str(sequence_length))\n global trainset_average_length\n trainset_average_length = sequence_length\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def pad_sentence_batch(sentence_batch):\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence))\n for sentence in sentence_batch]", "def _pad_sentence_length(self, sentence):\n if len(sentence) > self.pad_sentence_length:\n sentence = sentence[:self.pad_sentence_length]\n sentence[self.pad_sentence_length-1] = \"@EOS@\"\n elif len(sentence) < self.pad_sentence_length:\n sentence.extend([\"@PAD@\"] * (self.pad_sentence_length - len(sentence)))\n return sentence", "def pad_sents(sents, pad_token):\n MAX_LEN = max([len(sent) for sent in sents])\n sents_padded = [sent[:] for sent in sents]\n for sent in sents_padded:\n if len(sent) < MAX_LEN:\n sent += [pad_token]*(MAX_LEN - len(sent))\n return sents_padded", "def pad_sents(sents, pad_token):\n sents_padded = []\n\n max_length = max([len(sent) for sent in sents])\n sents_padded = [[sent[i] if i < len(sent) else pad_token for i in range(max_length)] for sent in sents]\n\n return sents_padded", "def pad_to_length(word_embeddings, length, padding):\n\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)", "def pad_words(words, length):\n diff_len = length - len(words)\n if diff_len <= 0:\n return words\n return words + [\"padding\"] * diff_len", "def pad_sequences(data, max_length):\n ret = []\n\n # Use this zero vector when padding sequences.\n zero_vector = [0] * Config.n_features\n zero_label = len(LBLS)-1 # corresponds to the 'O' tag\n\n for sentence, labels in data:\n ### YOUR CODE HERE (~4-6 lines)\n newSentence = []\n newLabels = []\n mask = []\n \n for i in range(0, max_length):\n if(i < len(sentence)):\n newSentence.append(sentence[i])\n newLabels.append(labels[i])\n mask.append(True)\n else:\n newSentence.append(zero_vector)\n newLabels.append(zero_label)\n mask.append(False)\n ret.append( (newSentence, newLabels, mask,[len(sentence)]) )\n ### END YOUR CODE ###\n return ret", "def _pad_large(self, arrays, sentinel):\n # Compute max length.\n maxlen_ctx = 0\n maxlen_sent = 0\n for array in arrays:\n maxlen_ctx = max(maxlen_ctx, len(array))\n for seq in array:\n maxlen_sent = max(maxlen_sent, len(seq))\n\n # Pad contexts\n ctx_lens = []\n ctx_sent_lens = []\n padded_ctxs = []\n for array in arrays:\n ctx_lens.append(len(array))\n padding = maxlen_ctx - len(array)\n padded_ctx = array + [[sentinel]] * padding\n # Pad sents\n padded = []\n lens = []\n for i, seq in enumerate(padded_ctx):\n padding = maxlen_sent - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq) if i < ctx_lens[-1] else 0)\n\n padded_ctxs.append(padded)\n ctx_sent_lens.append(lens)\n\n return padded_ctxs, ctx_lens, ctx_sent_lens", "def create_padded_sent(self, pad_len: numbers.Integral) -> 'Sentence':\n raise NotImplementedError(\"must be implemented by subclasses\")", "def pad_sentence_batch(sentence_batch):\r\n max_sentence = max([len(sentence) for sentence in sentence_batch])\r\n return [sentence + [vocab_to_int['<PAD>']] * (max_sentence - len(sentence)) for sentence in sentence_batch]", "def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result", "def wrap(cls, text, first=0, indent=15, maxwidth=75):\n outstr = []\n sentence = []\n if not text:\n return \"\"\n for word in text.split():\n if len(\" \".join(sentence)) + len(word) + first > maxwidth:\n outstr.append(\" \".join(sentence))\n sentence = [\" \" * indent, word]\n first = 0\n else:\n sentence.append(word.strip())\n outstr.append(\" \".join(sentence))\n return \"\\n\".join(outstr)", "def pad_sents_char(sents, char_pad_token):\n # Words longer than 21 characters should be truncated\n max_word_length = 21\n\n max_sent_length = max([len(sent) for sent in sents])\n padding_word = [char_pad_token] * max_word_length\n\n sents_padded = [[sent[i] if i < len(sent) else padding_word for i in range(max_sent_length)] for sent in sents]\n sents_padded = [[[word[i] if i < len(word) else char_pad_token for i in range(max_word_length)]\n for word in sent] for sent in sents_padded]\n\n return sents_padded", "def pad_sentence_(self, sentence):\n if len(sentence) > self.sentence_length:\n sentence = sentence[:self.sentence_length - 1]\n sentence.append('.')\n\n elif len(sentence) < self.sentence_length:\n for _ in range(self.sentence_length - len(sentence)):\n sentence.append(self.padding_token)\n\n # print(sentence)\n assert(len(sentence) == self.sentence_length)\n return sentence", "def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)", "def pad_sequences(sequences):\n max_len = max(s.shape[0] for s in sequences)\n padded = []\n for seq in sequences:\n zero_pad = np.concatenate(\n [seq, np.zeros((max_len - seq.shape[0], ) + seq.shape[1:])])\n padded.append(zero_pad[np.newaxis, :])\n\n return np.concatenate(padded, axis=0)", "def pad_sentence_batch(sentence_batch, pad_int):\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]", "def padding(sentence_list):\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)", "def padding_input(sents, pad_token=\"<pad>\", tgt_len=-1):\n if tgt_len == -1:\n tgt_len = max(len(s) for s in sents)\n batch_size = len(sents)\n seqs = []\n for i in range(batch_size):\n seqs.append(sents[i][0:tgt_len] + [pad_token] * (tgt_len - len(sents[i])))\n return seqs", "def pad_labellings(labels):\n target_length = max([len(labels) for labels in labels])\n padded = []\n\n for label in labels:\n padding_size = target_length - len(label)\n\n padded_label = label + [0] * padding_size\n\n assert len(padded_label) > 0\n\n padded.append(padded_label)\n\n return padded", "def pad_tweets(tweets, padding_word=\"<PAD/>\", sequence_length=None):\n if sequence_length is None:\n sequence_length = max(len(x) for x in tweets)\n padded_tweets = []\n for i in range(len(tweets)):\n tweet = tweets[i]\n num_padding = sequence_length - len(tweet)\n padded = tweet + [padding_word] * num_padding\n padded_tweets.append(padded)\n return padded_tweets", "def paddingSequence(X_train, X_test, maxLen=30):\r\n #######equalize list of seq\r\n X_train = pad_sequences(X_train, maxLen, padding='post', truncating='post')\r\n X_test = pad_sequences(X_test, maxLen, padding='post', truncating='post')\r\n return X_train, X_test", "def pad_data(d):\n max_len = set((len(i) for i in d))\n if len(max_len) == 1:\n return d\n else:\n max_len = max(max_len)\n return [i + [\"\"] * (max_len - len(i)) for i in d]", "def _pad(self, array, sentinel, max_len=None):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n if max_len is not None:\n maxlen = max(maxlen, max_len)\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def pad_data(data, max_length):\n ret = []\n\n # Use this zero vector when padding sequences.\n zero_vector = [0] * Config.n_features\n zero_label = 0\n\n for sentence, labels, attr in data:\n ### YOUR CODE HERE (~4-6 lines)\n labels_copy = labels[:]\n sentence_copy = sentence[:]\n sentence_length = len(sentence_copy)\n diff = max_length - sentence_length\n if diff > 0:\n sentence_copy += [zero_vector]*diff\n labels_copy += [zero_label]*diff\n mask = [(i < sentence_length) for i,_ in enumerate(sentence_copy)]\n ret.append((sentence_copy[:max_length], labels_copy[:max_length] , mask[:max_length], attr))\n ### END YOUR CODE ###\n return ret", "def _pad_sequences(sequences, pad=PAD):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def pad_middle(sent: list, max_len: int):\n num_pads = max_len-len(sent)\n padding = num_pads * [None]\n if not before_e2:\n return padding + sent\n elif before_e2 == 1:\n return [sent[0]] + padding + sent[1:]\n elif before_e2 == 2:\n return sent[:-1] + padding + [sent[-1]]\n else:\n return sent + padding", "def add_text_paddings(train_data,nlp_column,glove_filename_with_path,tokenized,\r\n fit_flag=True,\r\n max_length=100):\r\n train_index = train_data.index\r\n ### Encode Train data text into sequences\r\n train_data_encoded = tokenized.texts_to_sequences(train_data[nlp_column])\r\n ### Pad_Sequences function is used to make lists of unequal length to stacked sets of padded and truncated arrays\r\n ### Pad Sequences for Train\r\n X_train_padded = pad_sequences(train_data_encoded,\r\n maxlen=max_length,\r\n padding='post',\r\n truncating='post')\r\n print(' Data shape after padding = %s' %(X_train_padded.shape,))\r\n new_cols = ['glove_dim_' + str(x+1) for x in range(X_train_padded.shape[1])]\r\n X_train_padded = pd.DataFrame(X_train_padded, columns=new_cols, index=train_index)\r\n if fit_flag:\r\n return X_train_padded, tokenized, vocab_size\r\n else:\r\n return X_train_padded", "def merge_sentences_min_len(text: List[str], min_len: int) -> List[str]:\n\n def reducer(acc, x):\n if acc and (sum(map(len, acc[-1])) < min_len):\n acc[-1].append(x)\n return acc\n else:\n return acc + [[x]]\n\n new_text = ['. '.join(sents) for sents in reduce(reducer, text, [])]\n\n return new_text", "def pad_sequences(sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret", "def _pad(seqs, dtype=torch.float32):\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens", "def pad_sequences(self, X):\n return pad_sequences(X, maxlen=self.pad_length)", "def padding(sent, sequence_len):\n if len(sent) > sequence_len:\n sent = sent[:sequence_len]\n padding = sequence_len - len(sent)\n sent2idx = sent + [0]*padding\n return sent2idx, len(sent)", "def pad_collate_fn(batch):\n length = [len(sentence) for sentence in batch]\n return pad_sequence([torch.LongTensor(s) for s in batch], batch_first=True), torch.LongTensor(length)", "def pad_sequences(self,sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret", "def pad_sequences_1d(sequences, max_len=None, padding='post', truncating='post', value=0.):\n return pad_sequences(sequences, maxlen=max_len, padding=padding, truncating=truncating,\n value=value)", "def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence", "def padded_sequences(input_sequences, total_words):\r\n max_len = max([len(x) for x in input_sequences])\r\n input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_len, padding='pre'))\r\n print(input_sequences)\r\n\r\n predictors, label = input_sequences[:, :-1], input_sequences[:, -1] # creates two variables: sequence / next word of Ngram\r\n label = ku.to_categorical(label, num_classes=total_words)\r\n return predictors, label, max_len", "def pad_tokens(x, max_length, pad_token_id,\n truncate_from=\"left\",\n pad_from=\"left\"):\n assert truncate_from in (\"left\", \"right\")\n assert pad_from in (\"left\", \"right\")\n if len(x) > max_length:\n if truncate_from == \"left\":\n return x[-max_length:]\n else:\n return x[:max_length]\n elif len(x) < max_length:\n padding = [pad_token_id] * (max_length - len(x))\n if pad_from == \"left\":\n return padding + x\n else:\n return x + padding\n else:\n return x", "def pad(seq, n):\n return", "def padder(str_list, extra=0):\n length = max(len(str(s)) for s in str_list) + extra\n\n def pad(string):\n string = str(string)\n padding = max(0, length - len(string))\n return string + (padding * \" \")\n\n return pad", "def add_padding(x, maxlen=500):\n \n # May want to increase maxlen from 500! Not sure the total dist of chomragram lengths.\n\n for i in range(len(x)):\n x[i] = x[i][:,:maxlen]\n q = maxlen - x[i].shape[1]\n p = q//2\n# if q % 2 == 0:\n# x[i] = np.pad(x[i], ((p,p), (0,0)), 'constant', constant_values=(0,0))\n# else:\n# x[i] = np.pad(x[i], ((p,p+1), (0,0)), 'constant', constant_values=(0,0))\n\n print\n if q % 2 == 0:\n x[i] = np.pad(x[i], ((0,0), (p,p)), 'constant', constant_values=(0,0))\n else:\n x[i] = np.pad(x[i], ((0,0), (p,p+1)), 'constant', constant_values=(0,0))\n \n return x", "def merge_sentences(text: List[str], min_len: int) -> List[str]:\n\n def reducer(acc, x):\n x = x.strip()\n\n if acc and (len(nlp(acc[-1])) < min_len):\n if acc[-1] and (acc[-1][-1]) not in ['.', ':']:\n acc[-1] += '. {}'.format(x)\n else:\n acc[-1] += ' {}'.format(x)\n return acc\n else:\n return acc + [x]\n\n new_text = reduce(reducer, text, [])\n return new_text", "def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")", "def pad_nested_sequences_2(sequences, dtype='int32'):\n max_item_len = 0\n max_sent_len = 0\n max_word_len = 0\n for item in sequences:\n max_item_len = max(len(item), max_item_len)\n for sent in item:\n max_sent_len = max(len(sent), max_sent_len)\n for word in sent:\n max_word_len = max(len(word), max_word_len)\n\n x = np.zeros((len(sequences), max_item_len, max_sent_len, max_word_len)).astype(dtype)\n for i, item in enumerate(sequences):\n for j, sent in enumerate(item):\n for k, word in enumerate(sent):\n x[i, j, k, :len(word)] = word\n\n return x", "def pad_seq(seq, max_seq_len=0):\n if max_seq_len:\n pad_len = max_seq_len - len(seq)\n if pad_len > 0:\n return np.concatenate([seq, np.zeros(pad_len, dtype=np.int64)])\n elif pad_len < 0: # chop to fit\n two_last_tokens = seq[-2:]\n out = seq[:max_seq_len]\n out[-2:] = two_last_tokens\n return out.astype(np.int64)\n return seq.astype(np.int64)", "def pad_sequence(self, arr, max_length_tweet):\n # padding a list of indices with 0 until a maximum length (max_length_tweet)\n if max_length_tweet>len(arr):\n trailing_zeros = [0]*(max_length_tweet-len(arr))\n arr.extend(trailing_zeros)\n return arr[:max_length_tweet]", "def add_padding(self, text):\n\n for word in text.split(' '):\n # 5 character blocks added straight\n if len(word) == 5:\n self.output += word + ' '\n # calling the helper method to fill the blocks\n elif len(word) < 5:\n self._helper(word)\n # split the block up into 5 letter chunks\n elif len(word) > 5:\n block = ''\n for letter in word:\n block += letter\n if len(block) == 5:\n # append the chunk to output\n self.output += block + ' '\n block = ''\n self._helper(block)\n\n return self.output.upper()", "def pad_snt(snt_ids_trunc, max_len):\n\n snt_ids_trunc_pad = snt_ids_trunc + [PAD_ID] * (max_len - len(snt_ids_trunc))\n\n return snt_ids_trunc_pad", "def pad_question(words,m_word, pad_char):\n return [i+''.join([pad_char]*(m_word-len(i))) for i in words]", "def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list", "def normalizeTexts(texts):\n fCW = 0\n for item in texts:\n fCW = max(len(item), fCW)\n for counter, item in enumerate(texts):\n texts[counter] = texts[counter].ljust(fCW + 1, '.')\n return (texts, fCW)", "def pad_seq_list(array, sentinel):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def textJustification(words, maxWidth):\n lines = []\n currWordLen = 0\n temp = []\n\n # split up into different lines.\n\n # ensure everything before gets appended properly\n words.append('a' * maxWidth)\n\n for word in words:\n if len(word) + currWordLen > maxWidth:\n lines.append(temp)\n temp = []\n temp.append(word)\n currWordLen = len(word) + 1 # account for spaces\n else:\n temp.append(word)\n currWordLen += len(word) + 1\n\n res = []\n numLines = len(lines)\n for index, line in enumerate(lines):\n if index == numLines - 1:\n numWords = len(line)\n s = ' '.join(line)\n remainingSpaces = maxWidth - len(s)\n s += ' ' * remainingSpaces\n res.append(s)\n else:\n\n numWords = len(line)\n remainingSpaces = maxWidth - len(''.join(line))\n if numWords - 1 != 0:\n interSpace = remainingSpaces // (numWords - 1)\n remainingSpaces = remainingSpaces - \\\n ((numWords - 1) * interSpace)\n\n i = 0\n while remainingSpaces != 0:\n line[i] += ' '\n i = (i + 1) % (numWords)\n remainingSpaces -= 1\n\n res.append((' ' * interSpace).join(line))\n\n return res", "def pad_sequence(xs, length=None, padding=0):\n return PadSequence(length, padding).apply((xs))[0]", "def wrap(text, width=78, indent=0):\n paras = para_sep.split(text.strip())\n\n new_paras = []\n for par in paras:\n words = filter(None, whitespace.split(par))\n\n lines = []\n line = []\n length = indent\n for word in words:\n if length + len(word) <= width:\n line.append(word)\n length += len(word) + 1\n else:\n lines.append(' ' * indent + ' '.join(line))\n line = [word]\n length = len(word) + 1 + indent\n\n lines.append(' ' * indent + ' '.join(line))\n\n new_paras.append('\\n'.join(lines))\n\n return '\\n\\n'.join(new_paras) + '\\n\\n'", "def _pad_feature_sequences(sequences, pad=PAD, feature_dims=768):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]], [0, 0]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def truncate(text, max_length=140, pad_with_dot=True):\n if len(text) > max_length:\n if pad_with_dot:\n return text[:max_length-3] + \"...\"\n else:\n return text[:max_length]\n return text", "def limit_max_len(data, indentation, max_length=MAX_LENGTH): \n buf = ''\n while len(data) > MAX_LENGTH:\n idx = data.rfind(' ', 0, MAX_LENGTH)\n buf += '%s\\n%s' % (data[:idx], indentation)\n data = data[idx+1:]\n else:\n buf += data\n return buf", "def right_pad(message, pad_to=20, pad_with=' '):\n message = str(message)\n while len(message) < pad_to:\n message = message + pad_with\n return message", "def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added", "def pad_seqs_to_same_length(self, debug=False):\n\n maxima = self.get_padding_parameters(debug)\n\n for query in self.sw_info['queries']:\n swfo = self.sw_info[query]\n if 'padded' in swfo: # already added padded information (we're probably partitioning, and this is not the first step)\n return\n seq = swfo['seq']\n cpos = swfo['cyst_position']\n if cpos < 0 or cpos >= len(seq):\n print 'hm now what do I want to do here?'\n k_v = swfo['k_v']\n\n # padleft = maxima['fv_insertion_len'] + maxima['gl_cpos'] - cpos # left padding: biggest germline cpos minus cpos in this sequence\n # padright = maxima['gl_cpos_to_j_end'] + maxima['jf_insertion_len'] - (len(seq) - cpos)\n padleft = maxima['gl_cpos'] - cpos # left padding: biggest germline cpos minus cpos in this sequence\n padright = maxima['gl_cpos_to_j_end'] - (len(seq) - cpos)\n if padleft < 0 or padright < 0:\n raise Exception('bad padding %d %d for %s' % (padleft, padright, query))\n\n swfo['padded'] = {}\n padfo = swfo['padded'] # shorthand\n assert len(utils.ambiguous_bases) == 1 # could allow more than one, but it's not implemented a.t.m.\n padfo['seq'] = padleft * utils.ambiguous_bases[0] + seq + padright * utils.ambiguous_bases[0]\n if query in self.sw_info['indels']:\n print ' also padding reversed sequence'\n self.sw_info['indels'][query]['reversed_seq'] = padleft * utils.ambiguous_bases[0] + self.sw_info['indels'][query]['reversed_seq'] + padright * utils.ambiguous_bases[0]\n padfo['k_v'] = {'min' : k_v['min'] + padleft, 'max' : k_v['max'] + padleft}\n padfo['cyst_position'] = swfo['cyst_position'] + padleft\n padfo['padleft'] = padleft\n padfo['padright'] = padright\n if debug:\n print ' pad %d %d %s' % (padleft, padright, query)\n print ' %d --> %d (%d-%d --> %d-%d)' % (len(seq), len(padfo['seq']),\n k_v['min'], k_v['max'],\n padfo['k_v']['min'], padfo['k_v']['max'])\n\n if debug:\n for query in self.sw_info['queries']:\n print '%20s %s' % (query, self.sw_info[query]['padded']['seq'])", "def pad_sequences(sequences, maxlen, nb_sequences, dtype='int32', value=-1):\n\n x = (numpy.ones((nb_sequences, maxlen)) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n trunc = s[:maxlen]\n\n x[idx, :len(trunc)] = trunc\n\n return x", "def padding(self, n):\n if n < self._length: # pad with blanks\n k = self._length - n\n pad_str = \" \" * k\n else:\n pad_str = \"\"\n\n return pad_str", "def line_wrap(text, max_length = 80):\n output = []\n while text.__len__() >= max_length:\n split = text.rfind(' ', 0, max_length - 1)\n output.append(text[:split])\n text = text[split + 1:]\n\n return output", "def pad_or_trim(seq, max_len=1000):\n n, m = seq.shape\n \n if n > max_len:\n seq = seq[-max_len:, :]\n elif n < max_len:\n if sparse.issparse(seq):\n pad_csr(seq, (max_len, m))\n else:\n seq = np.r_[seq, np.zeros((max_len - n, m))]\n return seq", "def pad_samples(features, maxlen=50, pad=0):\n padded_features = []\n for feature in features:\n if len(feature) >= maxlen:\n padded_feature = feature[:maxlen]\n else:\n padded_feature = feature\n while len(padded_feature) < maxlen:\n padded_feature.append(pad)\n padded_features.append(padded_feature)\n return padded_features", "def pad_sequences_2d(sequences, dtype=torch.long):\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]\n extra_dims = ()\n\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask # , sen_lengths", "def padAlignment(align, applyPadding=True):\n if type(align) in [dict, np.ndarray, list]:\n align = pd.Series(align)\n\n \"\"\"Replace * and # with - and - \"\"\"\n for ind in align.index:\n if '*' in align[ind]:\n align[ind] = align[ind].replace('*', '-')\n if '#' in align[ind]:\n align[ind] = align[ind].replace('#', '-')\n \"\"\"Pad with gaps if the lengths are all the same\"\"\"\n if applyPadding:\n L = align.map(len).unique()\n if len(L) > 1:\n #print 'Sequences have different lengths (pading with gaps): %s' % L\n L = L.max()\n for ind in align.index:\n if len(align[ind]) < L:\n align[ind] = align[ind].ljust(L, '-')\n else:\n L = L.max()\n return align", "def pad(text, width, pad_character=\" \"):\n\n length = len(text)\n if width < 0 and length < -width:\n return text + (-width - length) * pad_character\n elif width > 0 and length < width:\n return (width - length) * pad_character + text\n else:\n return text", "def sentence_join(self, sentences):\n return \" \".join(sentences)", "def zero_pad_messages(messages, seq_len):\n messages_padded = np.zeros((len(messages), seq_len), dtype=int)\n for i, row in enumerate(messages):\n messages_padded[i, -len(row):] = np.array(row)[:seq_len]\n\n return np.array(messages_padded)", "def pad_sequence_to_length(sequence: List,\n desired_length: int,\n default_value: Callable[[], Any] = lambda: 0,\n padding_on_right: bool = True) -> List:\n # Truncates the sequence to the desired length.\n if padding_on_right:\n padded_sequence = sequence[:desired_length]\n else:\n padded_sequence = sequence[-desired_length:]\n # Continues to pad with default_value() until we reach the desired length.\n for _ in range(desired_length - len(padded_sequence)):\n if padding_on_right:\n padded_sequence.append(default_value())\n else:\n padded_sequence.insert(0, default_value())\n return padded_sequence", "def pad_sequence_to_length(sequence: List,\n desired_length: int,\n default_value: Callable[[], Any] = lambda: 0,\n padding_on_right: bool = True) -> List:\n # Truncates the sequence to the desired length.\n if padding_on_right:\n padded_sequence = sequence[:desired_length]\n else:\n padded_sequence = sequence[-desired_length:]\n # Continues to pad with default_value() until we reach the desired length.\n for _ in range(desired_length - len(padded_sequence)):\n if padding_on_right:\n padded_sequence.append(default_value())\n else:\n padded_sequence.insert(0, default_value())\n return padded_sequence", "def generateSentences(self, n, maxLength=5):\n\n if n < 1:\n return\n \n string = ''\n\n while n:\n prevWord = random.choice(self.starters)\n newSentence = prevWord + ' '\n sentenceFormed = False\n\n for _ in range(maxLength):\n keyFound = False\n while not keyFound:\n newStuff = ''\n if not prevWord:\n newSentence = ''\n break\n if prevWord in self.model:\n keyFound = True\n newStuff = random.choice(self.model[prevWord])\n else:\n listOfPrevWord = prevWord.split(' ')[::-1]\n listOfPrevWord.pop()\n prevWord = ' '.join(listOfPrevWord[::-1])\n\n if not newStuff:\n break\n\n newSentence += newStuff\n\n if newSentence and newSentence[-1] in '.?!\\'\\\"':\n sentenceFormed = True\n break\n \n newSentence += ' '\n if len(newSentence) < self.overlap:\n prevWord = newStuff.split(' ')\n else:\n prevWord = newStuff.split(' ')[-self.overlap]\n \n if sentenceFormed:\n n -= 1\n string += newSentence + ' ' \n \n return string", "def get_sentence(self):\r\n \r\n size = self.sentence_sizes.get()\r\n size += int(random.randrange(int(size * 0.8), int(size * 1.5)))\r\n\r\n sentence = \"\"\r\n opener = closer = None\r\n match_chance = self.punctuation_matched_chance\r\n \r\n for i in range(size):\r\n word = self.get_word()\r\n if self.last_word == word:\r\n # Retry to avoid repeats.\r\n word = self.get_word()\r\n self.last_word = word\r\n \r\n if i == 0:\r\n sentence += self.get_word().capitalize()\r\n elif opener:\r\n sentence += \" \" + opener + self.get_word()\r\n opener = None\r\n else:\r\n sentence += \" \" + self.get_word()\r\n \r\n if i != 0 and i != (size - 1):\r\n if random.random() > match_chance:\r\n if closer:\r\n sentence += closer\r\n closer = None\r\n match_chance = self.punctuation_matched_chance\r\n else:\r\n opener, closer = self.punctuation_matched.get()\r\n continue\r\n elif closer:\r\n # Make it increasingly likely to roll a closer\r\n match_chance *= 0.8\r\n if random.random() > self.punctuation_midline_chance:\r\n sentence += self.punctuation_midline.get()\r\n\r\n end_of_line = self.punctuation_endline.get()\r\n if closer:\r\n sentence = sentence.strip() + closer + end_of_line\r\n else:\r\n sentence = sentence.strip() + end_of_line\r\n return sentence, size", "def wrap(text, width):\n retstr = \"\"\n for word in text.split(' '):\n if len(retstr)-retstr.rfind('\\n')-1 + len(word.split('\\n',1)[0]) >= width:\n retstr += ' \\n' + word\n else:\n retstr += ' ' + word\n return retstr", "def pad(s):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\treturn s + b\"\\0\" * (AES.block_size - len(s) % AES.block_size)", "def pad(plain_text):\n number_of_bytes_to_pad = block_size - len(plain_text) % block_size\n ascii_string = chr(number_of_bytes_to_pad)\n padding_str = number_of_bytes_to_pad * ascii_string\n padded_plain_text = plain_text + padding_str\n return padded_plain_text", "def pad_trunc(data, maxlen):\n new_data = []\n\n # Create a vector of 0's the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = sample\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data", "def input_transpose_max_len(sents, pad_token,MAX_LEN):\n\n batch_size = len(sents)\n\n sents_t = []\n for i in range(MAX_LEN):\n sents_t.append([sents[k][i] if len(sents[k]) > i else pad_token for k in range(batch_size)])\n\n return sents_t", "def pad_task(task, max_story_length, max_sentence_length, max_query_length):\n stories = []\n questions = []\n answers = []\n task_ids = []\n for story, query, answer, task_id in task:\n for sentence in story:\n for _ in range(max_sentence_length - len(sentence)):\n sentence.append(0)\n assert len(sentence) == max_sentence_length\n\n for _ in range(max_story_length - len(story)):\n story.append([0 for _ in range(max_sentence_length)])\n\n for _ in range(max_query_length - len(query)):\n query.append(0)\n\n stories.append(story)\n questions.append(query)\n answers.append(answer)\n task_ids.append(task_id)\n\n assert len(story) == max_story_length\n assert len(query) == max_query_length\n\n return stories, questions, answers, task_ids", "def same_len(txt, name_len):\n return '\\n'.join(txt + ([' '] * (name_len - len(txt))))", "def indent_wrap(s, indent=0, wrap=80):\n split = wrap - indent\n chunks = [indent * \" \" + s[i:i + split] for i in range(0, len(s), split)]\n return \"\\n\".join(chunks)", "def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))", "def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad", "def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad", "def truncate_pad(line, num_steps, padding_token):\n if len(line) > num_steps:\n return line[:num_steps] # Truncate\n return line + [padding_token] * (num_steps - len(line)) # Pad", "def pad_sents(self, sents: List[List[int]], resource: str) -> List[List[int]]:\n assert resource in ['src', 'tgt'], \"wrong resource choice, only 'src' or 'tgt'\"\n \n max_length = max(len(s) for s in sents)\n if resource == 'tgt': max_length += 2\n # sents = sorted(sents, key=lambda s: len(s), reverse=True)\n sents_padded = []\n sents_len = []\n for s in sents:\n if resource == 'tgt':\n s = [self.word2id['<s>']] + s + [self.word2id['</s>']]\n sents_len.append(len(s))\n s_padded = s[:] + [self.pad_id]*(max_length-len(s))\n sents_padded.append(s_padded)\n return sents_padded, sents_len", "def average_length(sentences, padding_word=\"PAD\"):\n global trainset_average_length\n number_of_all = 0\n sum = 0\n averaged_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n sum = sum + len(sentence)\n number_of_all = number_of_all + 1\n average = int(sum / number_of_all)\n average = 35572\n trainset_average_length = average\n for i in range(len(sentences)):\n sentence = sentences[i]\n if len(sentence) < average:\n num_padding = average - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n averaged_sentences.append(new_sentence)\n elif len(sentence) > average:\n new_sentence = sentence[:average]\n averaged_sentences.append(new_sentence)\n else:\n averaged_sentences.append(sentence)\n print('Average Length is: ' + str(average))\n return averaged_sentences", "def pad_trunc(data, maxlen):\n new_data = []\n # Create a vector of 0s the length of our word vectors\n zero_vector = []\n for _ in range(len(data[0][0])):\n zero_vector.append(0.0)\n\n for sample in data:\n if len(sample) > maxlen:\n temp = sample[:maxlen]\n elif len(sample) < maxlen:\n temp = list(sample)\n # Append the appropriate number 0 vectors to the list\n additional_elems = maxlen - len(sample)\n for _ in range(additional_elems):\n temp.append(zero_vector)\n else:\n temp = sample\n new_data.append(temp)\n return new_data" ]
[ "0.81174576", "0.80845124", "0.8042058", "0.80319756", "0.7986996", "0.76386243", "0.75553435", "0.7518657", "0.7493447", "0.73408186", "0.7043601", "0.7007085", "0.6957604", "0.69015884", "0.6824467", "0.6684047", "0.66429424", "0.6604899", "0.66040134", "0.66006076", "0.6600036", "0.6585218", "0.65689003", "0.654909", "0.6532531", "0.6503587", "0.6303106", "0.62861246", "0.6226405", "0.62164456", "0.6173573", "0.61593884", "0.6146668", "0.6134344", "0.6093906", "0.6086493", "0.6084916", "0.6062146", "0.6041714", "0.60286206", "0.59932244", "0.5987946", "0.59752244", "0.5962364", "0.59413636", "0.59359473", "0.59198743", "0.58982885", "0.58961385", "0.58362854", "0.5815409", "0.58114195", "0.5792756", "0.5786089", "0.57851946", "0.57576036", "0.5755439", "0.5745642", "0.56972206", "0.5668106", "0.5653116", "0.56440777", "0.5631643", "0.5622321", "0.56173027", "0.5617161", "0.55889297", "0.55769706", "0.5536394", "0.5533161", "0.5518758", "0.55166787", "0.55010813", "0.5495267", "0.5481163", "0.5470116", "0.54654104", "0.54588354", "0.54507965", "0.5449146", "0.5447364", "0.5442361", "0.5442361", "0.543935", "0.5433355", "0.5426331", "0.5397847", "0.53844804", "0.5383336", "0.53809005", "0.53739506", "0.53700703", "0.53561133", "0.53491914", "0.53469163", "0.53469163", "0.53469163", "0.5345591", "0.53433657", "0.5341441" ]
0.7942444
5
This function takes a csv file as an argument deduplicates the file and writes the deduplicated dataset to a csv file if a path for the output file is provided as the second argument It returns the deduplicated dataframe Parameters , type, return values
def dataDedup_csv(infile, outfile=None): if fpath.isfile(infile): dataset = pd.read_csv(infile, sep=',', dtype='unicode') dedup_dataset = dataset.drop_duplicates() if outfile!=None: dedup_dataset.to_csv(outfile, encoding='utf-8', index=False, header=False) return dedup_dataset else: print("file \"%s\" does not exist... or is not a file..." %(infile))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_False=np.where(dup==False)\n\t\n no_dup=data.loc[dup_False]\n\n len_no_dup=no_dup.shape[0]\n len_dup_False_indx=len(dup_False[0])\n\n try:\n assert len_no_dup == len_dup_False_indx\n except AssertionError:\n print(\"Removal of duplicates and creation of new output failed.\")\n print(\"Length of no duplicated indices does not match the subsampled main dataframe... function failiure :(\")\n\n\t\n if header_rows !=0: \n frames = [header, no_dup]\n no_dup = pd.concat(frames)\n\n if sep_type==\"\":\n no_dup.to_csv(out_file, sep=\"\\t\", header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))\n else:\n no_dup.to_csv(out_file, sep=sep_type, header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))", "def save_csv(csv_path: str, duplicates: pd.DataFrame) -> None:\n csv_file = os.path.join(csv_path, 'duplicates.csv')\n duplicates.to_csv(csv_file, index=False)", "def remove_duplicates_phase_data():\n print(\"Removing any duplicates...\")\n merged_phases_data = pd.read_csv(results_folder + 'phases/raw/merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n df = pd.DataFrame(merged_phases_data)\n clean_df = df.drop_duplicates()\n clean_df.to_csv(results_folder + 'phases/processed/clean_merged_phases.csv', sep=',', index=False)\n print(\"Duplicates removed!\")", "def main(argv=None):\n args, ret = parse_cmdline(argv)\n if ret != GOOD_RET:\n return ret\n\n deduped = compress_dups(read_csv(args.file, all_conv=float), args.column)\n write_csv(deduped, create_out_fname(args.file, prefix=PREFIX),\n read_csv_header(args.file))\n\n return GOOD_RET # success", "def check_duplicates(in_file, sep_type=\"\", header_rows=0):\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_True=np.where(dup==True)\n len_dup_True_indx=len(dup_True[0])\n\n if len_dup_True_indx == 0:\n print(\"No duplicated rows in %s\" %(in_file))\n else:\n print(\"%i duplicated rows found in %s\" %(len_dup_True_indx, in_file))", "def remove_dupes(infile):\n filename = infile.replace('.csv', '-unique.csv')\n s = set()\n with open(filename, 'w') as outfile:\n for line in open(infile):\n if line not in s:\n outfile.write(line)\n s.add(line)", "def history_clones(file, ht_df):\n if os.path.isfile(file):\n # if the file exists, we merge\n print(file + ' found, merging')\n df_file = pd.read_csv(file)\n\n ht_df['timestamp'] = pd.to_datetime(ht_df['timestamp']).dt.date\n\n df_file = pd.concat([df_file, ht_df])\n df_file['timestamp'] = df_file['timestamp'].astype(str)\n\n df_file.sort_values('timestamp', inplace=True)\n print(df_file.to_string())\n # we can't just drop the first instance: for the first day, we'll loose data.\n # so keep max value per date\n\n #df_file.drop_duplicates(subset=['timestamp'], keep='last', inplace=True)\n df_file = df_file.groupby('timestamp')[['uniques', 'count']].agg(['max']).reset_index()\n\n df_file.columns = df_file.columns.droplevel(level=1)\n #print(df_file.to_string())\n #print(df_file.columns)\n df_file.to_csv(file, index=False)\n\n else:\n # otherwise, just dump the df\n print('There is no file to merge, dumping df to ' + file)\n ht_df.to_csv(file, index=False)", "def merge_duplicate_psm_rows(\n csv_file_path=None,\n psm_counter=None,\n psm_defining_colnames=None,\n psm_colnames_to_merge_multiple_values={},\n joinchar=\"<|>\",\n overwrite_file=True,\n):\n rows_to_merge_dict = defaultdict(list)\n\n if overwrite_file:\n tmp_file = csv_file_path + \".tmp\"\n os.rename(csv_file_path, tmp_file)\n out_file = csv_file_path\n else:\n tmp_file = csv_file_path\n out_file = csv_file_path.strip(\".csv\") + \"_merged_duplicates.csv\"\n UNode.print_info(\"Merging rows of the same PSM...\", caller=\"postflight\")\n # print('Merging rows of the same PSM...')\n csv_kwargs = {}\n if sys.platform == \"win32\":\n csv_kwargs[\"lineterminator\"] = \"\\n\"\n else:\n csv_kwargs[\"lineterminator\"] = \"\\r\\n\"\n with open(tmp_file, \"r\") as tmp, open(out_file, \"w\", newline=\"\") as out:\n tmp_reader = csv.DictReader(tmp)\n writer = csv.DictWriter(out, fieldnames=tmp_reader.fieldnames, **csv_kwargs)\n writer.writeheader()\n for row in tmp_reader:\n psm = tuple([row[x] for x in psm_defining_colnames if x in row.keys()])\n # each unique combination of these should only have ONE row!\n # i.e. combination of seq+spec+score\n if psm_counter[psm] == 1:\n # no duplicate = no problem, we can just write the row again\n writer.writerow(row)\n elif psm_counter[psm] > 1:\n # we have to collect all rows of this psm, and merge + write\n # them later!\n rows_to_merge_dict[psm].append(row)\n else:\n raise Exception(\"This should never happen.\")\n # finished parsing the old unmerged unified csv\n for rows_to_merge in rows_to_merge_dict.values():\n writer.writerow(\n merge_rowdicts(\n rows_to_merge,\n psm_colnames_to_merge_multiple_values,\n joinchar=joinchar,\n )\n )\n # remove the old unified csv that contains duplicate rows\n if overwrite_file:\n os.remove(tmp_file)\n UNode.print_info(\"Done.\", caller=\"postflight\")\n return out_file", "def remove_duplicated_lines():\n\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n unique_lines = []\n # compare line be line\n with open(os.path.join(work_folder, \"tempfile.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\")) as infile:\n for line in infile:\n if line not in unique_lines:\n outfile.write(line)\n unique_lines.append(line)\n # replace files\n shutil.copyfile(os.path.join(work_folder, 'tempfile.csv'), os.path.join(\n work_folder, \"filtered_merged_history_KMDW.csv\"))\n # remove temp file\n os.remove(os.path.join(work_folder, \"tempfile.csv\"))", "def prep_file(filename: str,\n dialect: csvhelper.Dialect,\n key_cols: List[int],\n temp_dir: str,\n out_dir: str,\n already_sorted: bool,\n already_uniq: bool) -> Tuple[str, int]:\n dups_removed = 0\n\n # Sort the file if necessary\n if already_sorted:\n if dialect.has_header:\n abort('Invalid config: already_sorted and has-header')\n sorted_fn = filename\n elif (dialect.quoting == csv.QUOTE_NONE\n and dialect.escapechar is None\n and dialect.doublequote is None\n and dialect.has_header is False):\n sorter = gsorter.CSVSorter(dialect, key_cols, temp_dir, out_dir) # type: ignore\n sorted_fn = sorter.sort_file(filename)\n else:\n sorted_fn = filename + '.sorted'\n sort_key_config = convert_key_offsets_to_sort_key_config(key_cols)\n sorter = gsorter.CSVPythonSorter(in_fqfn=filename, # type: ignore\n out_fqfn=sorted_fn,\n sort_keys_config=sort_key_config,\n dialect=dialect,\n dedupe=(not already_uniq),\n keep_header=False)\n sorter.sort_file() # type: ignore\n sorter.close() # type: ignore\n dups_removed = sorter.stats['recs_deduped'] # type: ignore\n already_uniq = True\n\n # Dedupe the file if necessary - only for the CSVSorter:\n if already_uniq:\n final_name = sorted_fn\n else:\n deduper = gdeduper.CSVDeDuper(dialect, key_cols, out_dir)\n final_name, read_cnt, write_cnt = deduper.dedup_file(sorted_fn)\n dups_removed = read_cnt - write_cnt\n if sorted_fn != filename:\n os.remove(sorted_fn)\n\n return final_name, dups_removed", "def create_unique_file(files_to_concat: list) -> pd.DataFrame:\n dfs_to_concat = []\n\n print(f'Number of files: {len(files_to_concat)}')\n\n for file in files_to_concat:\n\n year = int(file[0])\n month = file[1]\n filepath = file[2]\n\n # Use pd.read_csv to solve some problems with files\n # engine: python - This parameter is slower compared to c-engine but handle but handle\n # some problematic characters better\n # sep=\"[\\t;]\" - using python-engine it's possible to use regular expressions to define the sep char, where\n # python identify the char to use with each file.\n # skiprows = 1 - As the columns have different names in many files, I just combine header=None with skiprows=1\n # with this, just data is read.\n actual_df = pd.read_csv(filepath, engine='python', sep=\"[\\t;]\", skiprows=1, header=None, dtype='category')\n\n # File 2017-Dezembro.csv has duplicate columns so an if is necessary here just to solve this problem.\n if month == 'Dezembro' and year == 2017:\n\n del(actual_df[7])\n actual_df.columns = [n for n in range(12)]\n\n # Creating two new columns with month and year for each file.\n actual_df['month'], actual_df['year'] = zip(*[(month, year) for n in range(len(actual_df))])\n\n print(f'Processing file: {filepath}')\n\n dfs_to_concat.append(actual_df)\n\n # Concat all files into unique_df\n unique_df = pd.concat(dfs_to_concat, axis=0, ignore_index=True)\n\n return unique_df", "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def list_all_duplicates(folder: str,\n to_csv: bool = False,\n csv_path: str = './',\n ext: str = None,\n fastscan: bool = False) -> pd.DataFrame:\n duplicate_files = create_table(folder, ext, pre=fastscan)\n duplicate_files = duplicate_files[duplicate_files['hash'].duplicated(keep=False)]\n duplicate_files.sort_values(by='hash', inplace=True)\n\n if to_csv is True:\n save_csv(csv_path, duplicate_files)\n\n return duplicate_files", "def check_errors(csv_file):\n\n logger.info(\"Checking %s.\", csv_file)\n\n errors_found = False\n errors_file = f\"{os.path.splitext(csv_file)[0]}_errors.csv\"\n deduplicated_file = f\"{os.path.splitext(csv_file)[0]}_deduplicated.csv\"\n\n with open(csv_file, 'r', encoding=\"UTF-8\") as input_file,\\\n open(deduplicated_file, 'w', encoding=\"UTF-8\") as dedup,\\\n open(errors_file, 'w', encoding=\"UTF-8\") as errors:\n\n reader = csv.reader(input_file, delimiter=',')\n dedup_writer = csv.writer(dedup)\n error_writer = csv.writer(errors)\n line = 1\n entries = set()\n for row in reader:\n\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n # Record any incorrect classifications.\n if not row[1].lower() == \"normal\" and not row[1].lower() == \"anomaly\":\n error_writer.writerow(\n [line, row[0], row[1], \"INVALID_CLASSIFICATION\"])\n errors_found = True\n\n # Write first image entry to dedup file and record duplicates.\n key = row[0]\n if key not in entries:\n dedup_writer.writerow(row)\n entries.add(key)\n else:\n error_writer.writerow([line, row[0], row[1], \"DUPLICATE\"])\n errors_found = True\n line += 1\n\n if errors_found:\n logger.info(\"Errors found check %s.\", errors_file)\n else:\n os.remove(errors_file)\n os.remove(deduplicated_file)\n\n return errors_found", "def filter_unique_ticker(state: State):\n if state.events.extract_company_list + state.events.load_company_list == 200:\n try:\n state.files.combined_exchanges.columns = map(str.lower, state.files.combined_exchanges.columns)\n\n # Following line is dropping duplicates but there's not?\n state.output = state.files.combined_exchanges[[\"symbol\", 'name', 'lastsale', 'marketcap', 'ipoyear', 'sector', 'industry']].drop_duplicates()\n state.output.to_csv(f\"{PATH}/data/combined_exchanges.csv\")\n state.events.transform_company_list = 100\n except Exception as e:\n state.output = None\n LOGGER.warning(f\"Could not transform company data , error: {e}\")\n\n else:\n state.output = pd.read_csv(f\"{PATH}/data/combined_exchanges_sample.csv\")\n LOGGER.warning(f\"Using old company ticker file\")", "def make_clean_csv(panda_df, dest_path_name):\n panda_df.to_csv(dest_path_name)\n return True", "def check_duplicated_data(self, path, target):\n files_in_path = [file for file in self.get_csv_in_path(path)]\n print(\"check duplicated for file {} in path {} , files\".format(target, path))\n if target in files_in_path:\n print('The {} is already exist'.format(target))\n return True\n return False", "def dedup_file(in_fname, out_fname):\n with open(in_fname, 'r') as in_file, open(out_fname, 'w') as out_file:\n lines, n_lines, n_duplicates = get_lines(in_file)\n lines = list(lines)\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'deduplicated {in_fname}, removed {n_duplicates} duplicates out of {n_lines} lines')\n return n_lines, n_duplicates", "def remove_duplicates(file):\n file_tmp = 'tmp'\n with open(file) as f, open(file_tmp, 'w') as o:\n for line in unique_everseen(f):\n o.write(line)\n # rename file_tmp to file\n os.remove(file)\n os.rename(file_tmp, file)", "def _check_duplicate_id_csv(self):\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True", "def rm_dup_individuals(input_prefix, output_dir, base_prefix, prefix='temp_dedups_fids'):\n full_path, pprefix = os.path.split(input_prefix)\n\n # ============= OUTPUT FILES =============\n duplicated_samples_file = os.path.join(output_dir, '{}_samples_to_rm{}.csv'.format(prefix,base_prefix))\n no_dups_plink_prefix = os.path.join(output_dir, \"{}_{}\".format(prefix, base_prefix))\n\n # ============= REMOVE DUPLICATE SAMPLES =============\n # read fam file\n fam_df = pd.read_csv(input_prefix+\".fam\", sep=\"\\s+\", names=['FID', 'IID', 'c3', 'c4', 'c5', 'c6'])\n\n assert fam_df[~(fam_df.FID == fam_df.IID)].shape[0] == 0,\\\n \"FID and IID are *not* the same in this file:\\n{}\".format(input_prefix+\".fam\")\n\n\n # identify duplicated FID&IID\n dup_index = fam_df[fam_df.duplicated(subset=['FID', 'IID'], keep='first')].index\n dup_fids = fam_df.iloc[dup_index, :].FID.unique()\n\n # each duplicate FID & IID, except for the first instance, will be have \"_[counter]\" appened\n for this_fid in dup_fids:\n for counter, index_row in enumerate(fam_df.loc[fam_df['FID'] == this_fid].iterrows()):\n index, this_row = index_row\n if counter == 0:\n continue\n else:\n fam_df.loc[index, ['FID', 'IID']] = fam_df.loc[index, [\n 'FID', 'IID']].apply(lambda x: x+\"_{}\".format(counter))\n\n # write duplicated FID and IID to file\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n fam_df.loc[dup_index, ['FID', 'IID']].to_csv(duplicated_samples_file, sep=\" \", header=None, index=None)\n\n # OVERWRITE existing .fam to tagging duplicates\n fam_df.to_csv(input_prefix+\".fam\", sep=\" \", header=None, index=None)\n\n\n # plink to rm duplicates\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n rm_dups_cmd = \"plink --bfile {} --remove {} --make-bed --out {}\".format(\n input_prefix, duplicated_samples_file, no_dups_plink_prefix)\n else:\n rm_dups_cmd = \"plink --bfile {} --make-bed --out {}\".format(input_prefix, no_dups_plink_prefix)\n\n plink_stdout = run_shell_cmd(rm_dups_cmd)\n\n return no_dups_plink_prefix, plink_stdout", "def compress_dups(data, column):\n idx = defaultdict(list)\n for row in data:\n idx[row[column]].append(row)\n\n dedup = []\n\n for idx_row in sorted(idx.items()):\n dedup.append(avg_rows(idx_row[1]))\n return dedup", "def get_concatenated_csv_data(concatenated_filepath, concatenated_filename, device_id, output_create_files_filepath, output_create_files_filename):\n\n # Create the full file name of the concatenated filename.\n concatenated_file = concatenated_filepath + \"/\" + concatenated_filename + \"_concatenated.csv\"\n print(\"Looking for concatenated file name: \", concatenated_file)\n\n # Test if the concatenated file exists and if it does, return it.\n if os.path.isfile(concatenated_file):\n print(\"Concatenated file exists: \", concatenated_file)\n return concatenated_file\n\n # If it does not exist, test if the individual files exist.\n elif not os.path.isfile(concatenated_file):\n print(\"Concatenated file does not exist. Create file: \", concatenated_file)\n file_list = get_data_from_files(concatenated_filepath, concatenated_filename)\n # print(\"File list:\", file_list)\n\n # If the individual files exist, create the concatenated file.\n if len(file_list) > 0:\n print(\"Individual csv files exist. Creating the concatenated file.\")\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file\n\n # If the individual files do not exist, get the data from the database, create the files then concatenate them.\n else:\n database_query = \"select * from ship_data_gpggagpsfix where device_id=\" + int(\n device_id) + \" order by date_time;\"\n # print(database_query)\n password = input()\n\n db_connection = MySQLdb.connect(host='localhost', user='ace', passwd=password, db='ace2016', port=3306);\n\n track_df = get_data_from_database(database_query, db_connection)\n track_df = string_to_datetime(track_df)\n\n # Output the data into daily files (as they do not already exist).\n output_daily_files(track_df, output_create_files_filepath, output_create_files_filename)\n\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file", "def add_companies_to_csv(companies, filename):\n\n df_add = create_company_df(companies)\n df_old = pd.read_csv(filename)\n frames = [df_old, df_add]\n df = pd.concat(frames)\n df = df.drop_duplicates()\n\n df.to_csv(filename, index=False)", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def main(in_path, keep_path, out_path):\n\t# First open the input csv\n\tcsv_hndl = lambda x: np.array([np.array(r) for r in x])\n\tdata, headers = read_csv(in_path, csv_hndl, use_headers=True, delimiter=',')\n\n\t# Read headers to keep\n\tkeeps = []\n\n\t# Regex for ignoring comments\n\tcmnt_re = re.compile(\"^#\")\n\n\t# Open and read the file\n\twith open(keep_path) as f_obj:\n\t\tfor line in f_obj:\n\t\t\tline = line.strip()\n\t\t\t# If line is commented out, ignore\n\t\t\tif cmnt_re.match(line):\n\t\t\t\tcontinue\n\t\t\t# Otherwise add to list of keeps\n\t\t\tkeeps.append(line)\n\n\t# Prune the csv\n\tnew_data, new_headers = prune_csv(data,headers,keeps)\n\n\t# Write to output csv file\n\twrite_csv(\n\t\tout_path, \n\t\tnew_data, \n\t\tnew_headers, \n\t\tdelimiter=',', \n\t\tquotechar='\"',\n\t\tquoting=csv.QUOTE_MINIMAL\n\t)", "def append_to_csv(df, csvFilePath, sep=\",\", supersede=False):\n\n if (not os.path.isfile(csvFilePath)) or supersede==True:\n df.to_csv(csvFilePath, index=False, sep=sep)\n\n else:\n d_od=df.columns\n f_od=pd.read_csv(csvFilePath,nrows=0,sep=sep).columns\n if np.setxor1d(d_od,f_od).size:\n raise Exception(\"Columns do not match: Dataframe columns are: \",\n d_od, \". CSV file columns are: \", f_od, \".\")\n\n else:\n df[f_od].to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False)", "def generate_filtered_csv_file(file_path, rows_id):\n\n data = pandas.read_csv(file_path)\n\n df = pandas.DataFrame(data)\n\n filtered_data = df.loc[set(rows_id)]\n\n new_file_path = new_path_generator(file_path)\n\n filtered_data.to_csv(new_file_path, index=False, header=True)\n\n LOGGER.info('New file path: %s', new_file_path)\n\n return new_file_path", "def isolate_subreddit(csv_location, subreddit):\r\n\r\n individual_subreddit_csvs = csv_location + \"_\" + subreddit + '.*.csv'\r\n\r\n df = dd.read_csv(csv_location + \".csv\", header=0, sep='\\t')\r\n sub_df = df.loc[df['subreddit'] == subreddit]\r\n\r\n sub_df.to_csv(individual_subreddit_csvs)\r\n filenames = glob(individual_subreddit_csvs)\r\n with open(csv_location + \"_\" + subreddit + '.csv', 'w') as out:\r\n for fn in filenames:\r\n with open(fn) as f:\r\n out.write(f.read())\r\n os.remove(fn)", "def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")", "def drop_duplicate_rows(self):\n if self._pandas_flag:\n self.data_frame = self.data_frame.drop_duplicates()\n else:\n self.data_frame = self.data_frame.dropDuplicates()", "def remove_duplicates(\n self,\n output_file: Path = None,\n export_duplicates: bool = False,\n point_to_new_file: bool = True,\n ) -> None:\n if output_file is None:\n output_file = (\n Path(self._input_file.parent)\n / f\"{self._input_file.stem}_noduplicates{self._input_file.suffix}\"\n )\n else:\n output_file = Path(output_file)\n wrappers.run_seqkit_nodup(\n input_fasta=self._input_file,\n output_fasta=output_file,\n export_duplicates=export_duplicates,\n )\n if point_to_new_file:\n self.file_path = output_file", "def combine_files(file_name):\n\n\tif file_name == \"train\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Train/all_level1_train.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Train/all_level1_train.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Train/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop([\"Response\", \"Id\"],1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Response\")\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col + [\"Response\"]\n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Train/all_level1_train.csv\", index = False)\n\n\telif file_name == \"test\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Test/all_level1_test.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Test/all_level1_test.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Test/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop(\"Id\",1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col \n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Test/all_level1_test.csv\", index = False)", "def transform_febrl_dataset_with_dupes(\n febrl_file: pathlib.Path,\n) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n return _convert_febrl_dataset(febrl_file, contains_dupes=True)", "def csvs_scattered_to_grouped(path_dir, inlist, outlist, gcols,\n sort=1, scols=None, catalog=\"\", supersede=False):\n\n filelist=[os.path.join(path_dir,i) for i in inlist]\n n_split=len(outlist)\n\n pdfs=pd.read_csv(filelist[0],usecols=gcols)\n pdfs.drop_duplicates(inplace=True)\n\n print(\"csvs_scattered_to_grouped: Collecting items for group.\\n\")\n for i in range(1,len(filelist)):\n pdfs=pdfs.append(pd.read_csv(filelist[i],usecols=gcols),ignore_index=True)\n pdfs.drop_duplicates(inplace=True)\n\n if sort==1:\n pdfs.sort_values(gcols,inplace=True, ascending=True)\n elif sort==-1:\n pdfs.sort_values(gcols,inplace=True, ascending=False)\n\n aa_ed=np.array_split(pdfs, n_split)\n\n if supersede:\n for i in outlist:\n if os.path.isfile(os.path.join(path_dir,i)):\n os.remove(os.path.join(path_dir,i))\n if os.path.isfile(os.path.join(path_dir,str(catalog))):\n os.remove(os.path.join(path_dir,str(catalog)))\n\n print(\"csvs_scattered_to_grouped: Start processing files:\\n\")\n for i in range(0,len(filelist)):\n fi=pd.read_csv(filelist[i],usecols=scols)\n for j,ja in enumerate(aa_ed):\n wrtj=pd.merge(ja, fi, how='inner', on=gcols)\n append_to_csv(wrtj, os.path.join(path_dir,outlist[j]))\n print('csvs_scattered_to_grouped: '+str(i)+' file(s) finished.')\n\n if catalog:\n for i, d in enumerate(aa_ed):\n d['_@_FILE_']=outlist[i]\n append_to_csv(d, os.path.join(path_dir,str(catalog)))\n print('csvs_scattered_to_grouped: Catalog file created.')", "def removeDuplicateUrl(inputfile, outputfile):\n\t\n\tlines_seen = set()\n\toutfile = open(outputfile, \"w\")\n\tfor line in open(inputfile, \"r\"):\n \t\tif line not in lines_seen:\n\t\t\toutfileput.write(line)\n\t\t\tlines_seen.add(line)\n\n\toutputfile.close()", "def dedup_by_umi(input_bam, dedupped_bam, logfile):\n args = \"dedup -I {inbam} -S {outbam} -L {log} \\\n --method unique \\\n\t \".format(inbam=input_bam, outbam=dedupped_bam, log=logfile)\n run_cmd(umitools, args, dockerize=dockerize)\n\n\n\n #888888888888888888888888888888888888888888\n #\n # C o u n t i n g\n #\n #88888888888888888888888888888888888888888888", "def find_duplicates(file: str, folder: str) -> pd.DataFrame:\n file = format_path(file)\n folder = format_path(folder)\n\n file_hash = hashtable(file)\n\n duplicate_files = list_all_duplicates(folder)\n\n if len(file_hash) == 1:\n file_hash = file_hash[0]\n\n return duplicate_files[duplicate_files['hash'] == file_hash]", "def save_csv(csv_fn, output_dir, df_to_save):\n\n # import packages\n import os\n import pandas as pd\n\n\n if os.path.isfile(output_dir + '/' + csv_fn):\n print('Data already saved and will not be saved again')\n else:\n df_to_save.to_csv(output_dir + '/' + csv_fn, index = False)\n\n return None", "def load_and_clean(self,in_path):\n in_path = Path(in_path)\n try:\n df = pd.read_csv(in_path, index_col = 0, parse_dates = True, infer_datetime_format = True)\n except:\n print(\"Could not read csv file. Please check the path\")\n finally:\n #attempt to clean df\n df.dropna(inplace = True)\n df.drop_duplicates(inplace = True)\n df.sort_index()\n return df", "def sandia2parquet(csvPaths, outputPath):\n print(\"in sandia2parquet function\")\n df = pd.concat(pd.read_csv(p, parse_dates=[[0, 1]], index_col=0) for p in csvPaths)\n print(\"after data frame\")\n df.drop_duplicates(inplace=True)\n print(\"drop duplicates\")\n df.sort_index(inplace=True) # ensure datetime is increasing\n print(\"sort index\")\n df.to_parquet(outputPath)\n print(\"parquet made\")\n return outputPath", "def merge_csv_daily(output_filename, path):\n\n # import csv files from folder\n allFiles = glob.glob(path + \"*.csv\")\n\n with open(output_filename, 'wb+') as outfile:\n for i, fname in enumerate(allFiles):\n with open(fname, 'rb') as infile:\n if i != 0:\n infile.readline() # Throw away header on all but first file\n # Block copy rest of file from input to output without parsing\n shutil.copyfileobj(infile, outfile)\n # print(fname + \" has been imported.\")\n\n # adding MissingObs column back:\n df = pd.read_csv(output_filename, header=0, sep=',', index_col=[0,1], parse_dates=False)\n df.insert(loc=3, column='MissingObs', value=np.zeros((df.shape[0], )))\n df.to_csv(output_filename, sep=',')\n\n return output_filename", "def uniquified(self, d):\n print(\"the value befour conversion\",d)\n df_unique = d.drop_duplicates()\n\n print(\"after conversion\",df_unique)\n\n\n return df_unique", "def data_transform(filename):\n gap = 1\n dirpath = tempfile.mkdtemp()\n pd_list = []\n file_df = pd.read_csv(filename, header = 0)\n for line in range(len(file_df)):\n if line % gap == 0:\n print(line,len(file_df))\n rna_uuid = file_df.iloc[line][\"rna_seq_uuid\"]\n case_uuid = file_df.iloc[line][\"case_uuid\"]\n try:\n df = pd.read_csv(download_rna_seq([rna_uuid], dirpath),sep=\"\\t\",names = ['rna_id','value'])\n df = df.transpose()\n df.columns = df.iloc[0]\n df = df.drop(df.index[0])\n df[\"case_uuid\"] = str(case_uuid)\n pd_list.append(df.transpose())\n except:\n continue\n\n final_df = pd.concat(pd_list, axis=1, sort=False)\n final_df = final_df.transpose()\n\n return final_df", "def output_dupimgs(duplicate_img_fd, duplicate_images_urls):\n cs = csv.writer(duplicate_img_fd)\n cs.writerow([\"URL\", \"md5\"])\n dp_imgs = defaultdict(lambda: [])\n for (h, u) in duplicate_images_urls:\n dp_imgs[h].append(u)\n\n for h, urls in dp_imgs.items():\n if len(urls) > 1:\n for u in urls:\n cs.writerow([u, h])", "def testDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 foo/../file1_1.cc\nFILE 2 bar/../file1_1.cc\nFILE 3 baz/../file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 1\n1008 4 46 1\n100c 4 44 1\n\"\"\"\n self.assertParsed(INPUT, [], EXPECTED_OUTPUT)", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def remove_duplicates(file, number_of_fastas, path, output_name):\n\n path_to_pbds = path + 'Modeling/cleaned_template_pdbs/'\n path_to_fastas = path + 'Modeling/cleaned_template_fastas/'\n path_to_alignnment = path + 'Modeling/fasta_alns_and_identities/' + file\n fastas = parse_multifasta_file(path_to_alignnment, number_of_fastas)\n uniq_fastas = []\n with open(output_name, \"w\") as f:\n for i in range(number_of_fastas):\n name, seq = next(fastas)\n if seq not in uniq_fastas:\n uniq_fastas.append(seq)\n f.write('>' + name + '\\n')\n f.write(seq + '\\n')\n else:\n os.remove(path_to_pbds + name + '.pdb')\n os.remove(path_to_fastas + name + '.fasta')\n shutil.move(output_name, path + 'Modeling/fasta_alns_and_identities/')\n return len(uniq_fastas)", "def concat2csv(in_file, out_file):\n\t\n\tf=pd.read_csv(in_file, delim_whitespace=True, skiprows=0)\n\tnp.savetxt(out_file, f, delimiter=',')", "def csv_out(df, filepath='./', filename=None, overwrite=False):\n \n if filename == None:\n # Create generated filename\n filename = ''\n if 'commonname' in list(df.columns):\n filename += (df.iloc[0].commonname).lower().replace(' ','')\n else:\n filename += str(datetime.now())\n else:\n # TODO: Check if filename is good\n pass\n\n if overwrite == False:\n # Check if filename already exists\n filenumber = 0\n while path.exists(filepath + filename + str(filenumber)):\n filenumber += 1\n filename += f\"_{filenumber}\"\n \n df.to_csv(filepath + filename, index=False)", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format", "def group_tweets(target_dir:str, source_file_name:str, grouped_file_name:str) -> None:\n\n df = pd.read_csv(target_dir + source_file_name)\n df.dropna(inplace=True)\n df = df.groupby('Handle').agg(lambda x: \" \".join(list(set(x.tolist()))))\n df.to_csv(target_dir + grouped_file_name)", "def write_csv(data: set, file_name: str, column_name: str):\n tmp = pd.DataFrame(data, columns=[column_name])\n tmp.to_csv(file_name, index=False, header=False)", "def dataset_1():\n csv = pd.read_csv('resources/dataset_1.csv') # Pandas loads the CSV file as a DataFrame object\n csv.fillna('', inplace=True) # Pandas fills empty celles with NaN. We replace every Nan value with an emtpy string.\n csv.num_rue = csv.num_rue.apply(str) # Cast street numbers to strings\n # Create a new column named 'address' which concatenates the columns ['num_rue', 'cpltnum_ru', 'type_rue', 'article_ru', 'nom_rue']\n # csv[['num_rue', 'cpltnum_ru', 'type_rue', 'article_ru', 'nom_rue']] select a subset of the table 'csv'.\n # .agg(' '.join, axis=1) is equivalent to merge the selected cells of every lines as 'num_rue' + ' ' + 'cpltnum_ru' + ' ' + 'type_rue' + ' ' + 'article_ru' + ' ' + 'nom_rue'\n csv['address'] = csv[['num_rue', 'cpltnum_ru', 'type_rue', 'article_ru', 'nom_rue']].agg(' '.join, axis=1)\n return csv", "def analysis_2_result(units_df,output_folder_path):\n two_wheeler_df = units_df\\\n .filter(col(\"VEH_BODY_STYL_ID\").isin([\"POLICE MOTORCYCLE\", \"MOTORCYCLE\"]))\\\n .distinct()\\\n .agg(count(\"VEH_BODY_STYL_ID\").alias(\"TWO_WHEELER_COUNT\"))\n # distinct is calculated as there are entries with duplicate details\n # : with_duplicate_count = 784 2 wheelers\n # : without_duplicates_count = 773 2 wheelers\n print(\"Analysis 2: \\nTotal number of two wheelers are booked for crashes is :\")\n two_wheeler_df.show() #Displaying result DF\n write_df_to_csv(two_wheeler_df,output_folder_path+\"analysis_2_result\") #Writing to csv file", "def to_csv(self, \n last_match_id, \n first_match_id = 0, \n file_count = 20, \n start_file = 0, \n matches_per_file = 20000):\n for i in range(start_file, start_file + file_count):\n print(i)\n last_match_id_current = last_match_id - i * matches_per_file\n file_name = 'rawdata_' + str(i) + '.csv'\n currunt_dataframe = self.mine_data(file_name = file_name,\n first_match_id = first_match_id,\n last_match_id = last_match_id_current,\n stop_at = matches_per_file)\n currunt_dataframe.to_csv('rawdata_' + str(i) + '.csv')", "def hxldedup_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):\n parser = make_args('Remove duplicate rows from a HXL dataset.')\n parser.add_argument(\n '-t',\n '--tags',\n help='Comma-separated list of column tags to use for deduplication (by default, use all values).',\n metavar='tag,tag...',\n type=hxl.model.TagPattern.parse_list\n )\n add_queries_arg(parser, 'Leave rows alone if they don\\'t match at least one query.')\n\n args = parser.parse_args(args)\n\n do_common_args(args)\n\n with make_source(args, stdin) as source, make_output(args, stdout) as output:\n filter = hxl.filters.DeduplicationFilter(source, args.tags, args.query)\n hxl.input.write_hxl(output.output, filter, show_tags=not args.strip_tags)\n\n return EXIT_OK", "def clean_file(df):\n df_clean = df.drop_duplicates()\n df_no_zeros = df_clean[df_clean[2] != 0]\n df_sorted = df_no_zeros.sort()\n\n return df_sorted", "def dump_csv():\n df = helper.load_dataframe('asintosku').reset_index()\n df['min'] = None\n df['max'] = None\n df.asin = df.asin + np.where(\n df.isprime == 0, '_seller', '_prime')\n del df['isprime']\n dfold = load_csv()\n merged = dfold.append(df, ignore_index=True, sort=True).sort_values(\n 'min', ascending=False).drop_duplicates(['seller_sku'])\n merged[['asin', 'mean', 'min', 'max', 'seller_sku']].to_csv(\n datafolder+filename, index=False)", "def write_tocsv(file_name, dataframe) :\n print(\"\\nSaved result to {}\\n\".format(file_name))\n dataframe.to_csv(file_name, mode='a', header=False,index=False)", "def clean_duplicated_identifiers(rows):\n\n logger.info('Cleaning duplicates')\n unique_identifiers = []\n c = 0\n for row in rows:\n c += 1\n idf = row['identifier']\n logger.info(f'Searching duplicates {c} {idf}')\n if idf not in unique_identifiers:\n unique_identifiers.append(idf)\n yield row\n else:\n row['is_duplicate'] = True\n logger.info(f'{idf} is duplicated')\n yield row", "def drop_duplicate_rows(df):\n\n\t# No. of duplicated rows\n\tndup_rows = get_duplicate_rows(df)\n\n\tprint('There are {} duplicated rows in the dataset.'.format(ndup_rows))\n\tif (ndup_rows > 0):\n\t\treturn df.drop_duplicates().reset_index(inplace=True, drop=True)\n\t\tprint('Dropped {} rows from the dataset.'.format(ndup_rows))", "def table_to_csv(output_table, cat_column, method, out_csv_names, debug):\n p_df = df_to_pandas(output_table)\n no_of_prod = len(p_df)\n head_df = pd.DataFrame()\n head_df[\"Cluster Name\"] = p_df.reset_index()[cat_column]\n head_df_list = head_df[\"Cluster Name\"].tolist()\n try:\n cluster_matrix = hierarical_clustering(p_df, method)\n except Exception as e:\n raise Exception(\"Distance matrix has some issue:\"+str(e))\n # head_df.sort(\"Cluster Name\", inplace=True) # original\n head_df = head_df.sort_values([\"Cluster Name\"]) # changed by mukul\n head_df[\"Cluster Number\"] = range(1, no_of_prod + 1)\n head_df = change_column_order(head_df, \"Cluster Number\", 0)\n p_df = pd.DataFrame(cluster_matrix, columns=[\"Idj1\", \"Idj2\", \"SemipartialRSq\", \"priority\"])\n p_df[\"NumberOfClusters\"] = range(len(p_df),0,-1)\n p_df = format_column(p_df, \"Idj1\", no_of_prod, \"NumberOfClusters\")\n p_df = format_column(p_df, \"Idj2\", no_of_prod, \"NumberOfClusters\") \n p_df.drop(\"priority\", axis=1, inplace=True)\n p_df = change_column_order(p_df, \"NumberOfClusters\", 0)\n if not debug:\n p_df.to_excel(out_csv_names[0], index=False)\n head_df.to_excel(out_csv_names[1], index=False)\n return head_df, p_df, head_df_list, cluster_matrix", "def compare_folders(reference_folder: str, compare_folder: str,\n to_csv: bool = False, csv_path: str = './', ext: str = None) -> pd.DataFrame:\n df_reference = create_table(reference_folder, ext)\n df_compare = create_table(compare_folder, ext)\n\n ind_duplicates = [x == df_reference['hash'] for x in df_compare['hash'].values]\n duplicate_files = df_compare.iloc[ind_duplicates]\n\n duplicate_files.drop_duplicates(subset='file', inplace=True)\n\n if to_csv is True:\n save_csv(csv_path, duplicate_files)\n\n return duplicate_files", "def screenshot_csv(csv_in_name, csv_out_name, pics_out_path, screenshot_method, timeout_duration, lazy, be_lazy):\n\n with open(csv_in_name, 'r') as csv_file_in:\n csv_reader = csv.reader(csv_file_in)\n with open(csv_out_name, 'w+') as csv_file_out:\n csv_writer = csv.writer(csv_file_out, delimiter=',', quoting=csv.QUOTE_ALL)\n csv_writer.writerow([\"archive_id\", \"url_id\", \"date\", \"succeed_code\", \"archive_url\"])\n\n count = 0\n compare = '0'\n for line in csv_reader:\n if count == 0: # skip the header\n count += 1\n continue\n\n archive_id = str(line[0])\n url_id = line[1]\n date = line[2]\n url = line[3]\n\n if url == \"\":\n continue\n\n if be_lazy is True: # makes running faster by not doing hundreds of archive sites\n if url_id != compare:\n count = 0\n compare = url_id\n else:\n count += 1\n if count > lazy:\n continue\n\n print(\"\\nurl #{0} {1}\".format(url_id, url))\n logging.info(\"url #{0} {1}\".format(url_id, url))\n\n succeed = take_screenshot(archive_id, url_id, date, url, pics_out_path, screenshot_method,\n timeout_duration)\n\n csv_writer.writerow([archive_id, url_id, date, succeed, url])", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def write_csv(data_frame, file_name):\n data_frame.coalesce(1).write \\\n .option('header', True).mode('overwrite') \\\n .save(f'outputs/{file_name}', format('csv'))", "def csv_to_ins_file(\n csv_filename,\n ins_filename=None,\n only_cols=None,\n only_rows=None,\n marker=\"~\",\n includes_header=True,\n includes_index=True,\n prefix=\"\",\n head_lines_len=0,\n sep=\",\",\n gpname=False,\n):\n # process the csv_filename in case it is a dataframe\n if isinstance(csv_filename, str):\n df = pd.read_csv(csv_filename, index_col=0)\n df.columns = df.columns.map(str.lower)\n df.index = df.index.map(lambda x: str(x).lower())\n else:\n df = csv_filename\n\n # process only_cols\n if only_cols is None:\n only_cols = set(df.columns.map(lambda x: x.lower().strip()).tolist())\n else:\n if isinstance(only_cols, str): # incase it is a single name\n only_cols = [only_cols]\n only_cols = set(only_cols)\n only_cols = {c.lower() if isinstance(c, str) else c for c in only_cols}\n\n if only_rows is None:\n only_rows = set(df.index.map(lambda x: x.lower().strip()).tolist())\n else:\n if isinstance(only_rows, str): # incase it is a single name\n only_rows = [only_rows]\n only_rows = set(only_rows)\n only_rows = {r.lower() if isinstance(r, str) else r for r in only_rows}\n\n # process the row labels, handling duplicates\n rlabels = []\n row_visit = {}\n only_rlabels = []\n for rname_org in df.index:\n rname = str(rname_org).strip().lower()\n if rname in row_visit:\n rsuffix = \"_\" + str(int(row_visit[rname] + 1))\n row_visit[rname] += 1\n else:\n row_visit[rname] = 1\n rsuffix = \"\"\n rlabel = rname + rsuffix\n rlabels.append(rlabel)\n if rname in only_rows or rname_org in only_rows:\n only_rlabels.append(rlabel)\n only_rlabels = set(only_rlabels)\n\n # process the col labels, handling duplicates\n clabels = []\n col_visit = {}\n only_clabels = []\n for cname_org in df.columns:\n cname = str(cname_org).strip().lower()\n if cname in col_visit:\n csuffix = \"_\" + str(int(col_visit[cname] + 1))\n col_visit[cname] += 1\n else:\n col_visit[cname] = 1\n csuffix = \"\"\n clabel = cname + csuffix\n clabels.append(clabel)\n if cname in only_cols or cname_org in only_cols:\n only_clabels.append(clabel)\n only_clabels = set(only_clabels)\n if len(only_clabels) == 0:\n print(\"only_cols:\", only_cols)\n raise Exception(\"csv_to_ins_file(): only_clabels is empty\")\n\n if ins_filename is None:\n if not isinstance(csv_filename, str):\n raise Exception(\"ins_filename is None but csv_filename is not string\")\n ins_filename = csv_filename + \".ins\"\n row_visit, col_visit = {}, {}\n onames = []\n ovals = []\n ognames = []\n only_clabels_len = len(only_clabels)\n clabels_len = len(clabels)\n prefix_is_str = isinstance(prefix, str)\n vals = df.values.copy() # wasteful but way faster\n with open(ins_filename, \"w\") as f:\n f.write(f\"pif {marker}\\n\")\n [f.write(\"l1\\n\") for _ in range(head_lines_len)]\n if includes_header:\n f.write(\"l1\\n\") # skip the row (index) label\n for i, rlabel in enumerate(rlabels): # loop over rows\n f.write(\"l1\")\n if rlabel not in only_rlabels:\n f.write(\"\\n\")\n continue\n c_count = 0\n line = \"\"\n for j, clabel in enumerate(clabels): # loop over columns\n\n if j == 0:\n # if first col and input file has an index need additional spacer\n if includes_index:\n if sep == \",\":\n # f.write(f\" {marker},{marker}\")\n line += f\" {marker},{marker}\"\n else:\n # f.write(\" !dum!\")\n line += \" !dum! \"\n\n if c_count < only_clabels_len:\n if clabel in only_clabels: # and rlabel in only_rlabels:\n oname = \"\"\n # define obs names\n if not prefix_is_str:\n nprefix = prefix[c_count]\n else:\n nprefix = prefix\n if len(nprefix) > 0:\n nname = f\"{nprefix}_usecol:{clabel}\"\n else:\n nname = f\"usecol:{clabel}\"\n oname = f\"{nname}_{rlabel}\"\n onames.append(oname) # append list of obs\n ovals.append(vals[i, j]) # store current obs val\n # defin group name\n if gpname is False or gpname[c_count] is False:\n # keeping consistent behaviour\n ngpname = None # nname\n elif gpname is True or gpname[c_count] is True:\n ngpname = nname # set to base of obs name\n else: # a group name has been specified\n if not isinstance(gpname, str):\n ngpname = gpname[c_count]\n else:\n ngpname = gpname\n ognames.append(ngpname) # add to list of group names\n # start defining string to write in ins\n oname = f\" !{oname}!\"\n line += f\" {oname} \"\n if j < len(clabels) - 1:\n if sep == \",\":\n line += f\" {marker},{marker} \"\n # else:\n # line += \" !dum! \"\n c_count += 1\n elif (\n j < len(clabels) - 1\n ): # this isnt a row-col to observationalize (nice word!)\n if sep == \",\":\n line += f\" {marker},{marker} \"\n else:\n line += \" !dum! \"\n f.write(line + \"\\n\")\n odf = pd.DataFrame(\n {\"obsnme\": onames, \"obsval\": ovals, \"obgnme\": ognames}, index=onames\n ).dropna(\n axis=1\n ) # dropna to keep consistent after adding obgnme\n return odf", "def concat_vsource_sink_csv(csv_fn1,csv_fn2,merged_source_sink_in,\n csv_type,csv_merged,freq='infer',how='left'):\n # merged_source_sink_in: the merged source_sink.in or source_sink.yaml file \n # where the data sources are from csv_fn1, csv_fn2. \n if merged_source_sink_in.endswith('yaml'):\n df_sources,df_sinks = read_source_sink_yaml(merged_source_sink_in)\n elif merged_source_sink_in.endswith('in'):\n df_sources,df_sinks = read_source_sink_in(merged_source_sink_in)\n else:\n raise NotImplementedError(\n 'merged_source_sink_in can either be .yaml or .in file')\n if csv_type == 'sources':\n sites = df_sources.index\n elif csv_type == 'sink':\n sites = df_sinks.index\n else:\n raise NotImplementedError('csv_type can either be sources or sinks')\n th1 = read_source_sink_csv(csv_fn1)\n th2 = read_source_sink_csv(csv_fn2)\n if freq=='infer':\n if th1.index.freq!=th2.index.freq:\n print(\"th1 and th2 has different frequency\")\n else:\n th1 = th1.asfreq(freq)\n th2 = th2.asfreq(freq)\n th_merged = th1.join(th2,how=how,rsuffix='r').drop(columns=['datetimer'])\n th_merged = th_merged.fillna(-9999.0)\n cols = np.append(['datetime'],sites)\n th_merged = th_merged[cols] #rearrange the array to have the same order as defined in merged_source_sink_in\n th_merged['datetime'] = np.datetime_as_string(th_merged.index.values,'h')\n write_source_sink_csv(th_merged,csv_merged)", "def write_df_to_csv(output_df,file_path):\n output_df\\\n .coalesce(1)\\\n .write\\\n .format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .mode(\"overwrite\")\\\n .save(file_path)", "def pre_process_reviews(csv, outputname):\n df = pd.read_csv(csv)\n df = df.drop(\"Unnamed: 0\", axis='columns')\n df.to_csv(outputname, index=False)", "def prepare_dataset(dataset, manifest):\n\n ## importation\n import pandas as pd\n\n ## craft output_filename\n output_filename = dataset.replace(\".csv\", \"_labeled.csv\")\n\n ## load dataset\n df_data = pd.read_csv(dataset)\n df_data = df_data.set_index('ID')\n\n ## load manifest\n df_cluster = pd.read_csv(manifest)\n\n ## merge\n result = df_data.join(df_cluster.set_index('ID'))\n\n ## drop columns conatining NA\n result = result.dropna(axis='columns')\n\n ## save dataset\n result.to_csv(output_filename)", "def big_dedup_file(in_fname, out_fname, n_bins):\n filehandles = []\n for i in range(n_bins):\n filehandles.append(open(f'temp{i}.txt', 'w'))\n handle_iter = itertools.cycle(filehandles)\n with open(in_fname, 'r') as in_file:\n for line in in_file:\n next(handle_iter).write(line)\n for filehandle in filehandles:\n filehandle.close()\n\n with open(out_fname, 'w') as out_file:\n for i in range(n_bins):\n with open(f'temp{i}.txt', 'r') as tempfile:\n # deduplicate\n lines = list(set(tempfile.read().split('\\n')))\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'pseudodeduplicated {in_fname}, {out_fname} is also pseudorandomized')", "def del_results_csv(request):\n if request.method == \"POST\":\n try:\n sources = set()\n dataset = request.FILES['dataset']\n handle_uploaded_file(dataset, 'temp/del_rels_csv.csv')\n df = pd.read_csv('temp/del_rels_csv.csv')\n for i, row in df.iterrows():\n rel_id = row['rel_id']\n objs = ExtractedRelation.objects.filter(rel_id=rel_id)\n for o in objs:\n sources.add(o.source)\n objs.delete()\n for s in sources:\n if len(ExtractedRelation.objects.filter(source=s)) == 0:\n Source.objects.filter(source_id=s.source_id).delete()\n except Exception as e:\n print(str(e))\n tb = traceback.format_exc()\n print(tb)\n \n return HttpResponse(\n json.dumps({\"status\": \"error\"}),\n content_type=\"application/json\"\n )\n \n return HttpResponse(\n json.dumps({\"status\": \"success\"}),\n content_type=\"application/json\"\n )", "def __write_dupe_file(self, filename):\n sortedList = sorted(self.dupeList, key=lambda file: file[0])\n with open(filename, mode='w') as outfile:\n for size, md5, filename, ino in sortedList:\n outfile.write(\"%s %s %s %s\\n\" % (size, md5, ino, filename))", "def drop_internal_overlap(fname):\n smi_list = []\n with open (fname) as f:\n for line in f:\n smi_list.append(line.rstrip())\n can_smi_list = can_smile(smi_list)\n unique_lst = set(can_smi_list)\n unique_lst = list(unique_lst)\n outf = pd.DataFrame()\n outf['Cano_SMILES'] = pd.Series(data=unique_lst)\n outf.to_csv('Unique_'+fname, index=False)", "def export_uniq_ads(ads, out_folder, rel_folder):\n try :\n os.makedirs(out_folder)\n os.makedirs(os.path.join(out_folder, rel_folder))\n except OSError:\n LOG.debug('Creating output folder')\n\n fwtr = open(os.path.join(out_folder, 'uniq_ads.csv'), 'w')\n # Relative location = Location of the ad within this current session\n # Global location, added when an ad is matched with existing ads in DB\n fwtr.write('#UID, Ad-Company, Ad-Filetype, Height, Width, Rel-Location, src\\n')\n \n for bug in ads.keys():\n height, width = bug.get_dimension()\n filepath = bug.get_filepath()\n name = bug.get_name()\n src = bug.get_src()\n filetype = bug.get_filetype()\n new_uuidname = '%s.%s' % (uuid1(), filetype)\n bug.set_uuid(new_uuidname)\n new_filepath = os.path.join(out_folder, new_uuidname)\n rel_filepath = os.path.join(rel_folder, new_uuidname)\n copy2(filepath, new_filepath)\n fwtr.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\\n'.format(new_uuidname,\n name, filetype, height, width, rel_filepath, src))\n fwtr.close()\n return ads", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n df = create_data_frame(input_filepath)\n process_columns(df)\n logger.info(df.head())\n df.to_csv(output_filepath, index=False)", "def output_daily_files(dataframe, path, filename):\n\n days = dataframe.groupby('date_time_day')\n dataframe.groupby('date_time_day').size().reset_index(name='data points per day')\n\n for day in days.groups:\n print(day.date())\n output_path = path + filename + \"_\" + str(day.date()) + '.csv'\n print(\"Creating intermediate flagged data file: \", output_path)\n days.get_group(day).to_csv(output_path, index=False)", "def seed_from_csv_diff(original_file_path, new_file_path, model, **kwargs):\n\n original_diff_set = set()\n new_diff_set = set()\n new_file = open(new_file_path, 'r')\n headers = new_file.readline().replace('\\n', '').split(',')\n new_reader = model.update_set_filter(csv.reader(new_file), headers)\n\n original_file = open(original_file_path, 'r')\n original_reader = csv.reader(original_file)\n next(original_reader, None)\n logger.debug(\" * Beginning CSV diff process.\")\n\n for row in new_reader:\n new_diff_set.add(json.dumps(row))\n\n for row in original_reader:\n original_diff_set.add(json.dumps(row))\n\n diff = new_diff_set - original_diff_set\n temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(\n 'set_diff' + str(random.randint(1, 10000000))) + '.mock' if settings.TESTING else '.csv')\n with open(temp_file_path, 'w') as temp_file:\n writer = csv.writer(temp_file, delimiter=',')\n writer.writerow(headers)\n for row in diff:\n writer.writerow(json.loads(row))\n\n diff_gen = from_csv_file_to_gen(temp_file_path, kwargs['update'])\n logger.debug(\" * Csv diff completed, beginning batch upsert.\")\n batch_upsert_from_gen(model, diff_gen, settings.BATCH_SIZE, **kwargs)\n if os.path.isfile(temp_file_path):\n os.remove(temp_file_path)\n if 'callback' in kwargs and kwargs['callback']:\n kwargs['callback']()", "def read_csv_input_file(self,\n file_name: str,\n usecols: list = None,\n names: list = None,\n remove_spurious_urls=False,\n unique_key=None\n ):\n\n # split the extension two time so we can also deal with a double extension bla.csv.zip\n file_base, file_ext = os.path.splitext(file_name)\n file_base2, file_ext2 = os.path.splitext(file_base)\n\n # build the cache file including the cache_directory\n cache_file = Path(CACHE_DIRECTORY) / (file_base2 + \".pkl\")\n\n if os.path.exists(cache_file):\n # add the type so we can recognise it is a data frame\n self.logger.info(\"Reading from cache {}\".format(cache_file))\n df: pd.DataFrame = pd.read_pickle(cache_file)\n df.reset_index(inplace=True)\n elif \".csv\" in (file_ext, file_ext2):\n self.logger.info(\"Reading from file {}\".format(file_name))\n df = pd.read_csv(file_name,\n header=None,\n usecols=usecols,\n names=names\n )\n\n if remove_spurious_urls:\n self.logger.info(\"Removing spurious urls\")\n df = self.remove_spurious_urls(df)\n\n df = self.clip_kvk_range(df, unique_key=unique_key, kvk_range=self.kvk_range_read)\n\n self.logger.info(\"Writing data to cache {}\".format(cache_file))\n df.to_pickle(cache_file)\n else:\n raise AssertionError(\"Can only read h5 or csv files\")\n\n try:\n df.drop(\"index\", axis=0, inplace=True)\n except KeyError:\n self.logger.debug(\"No index to drop\")\n else:\n self.logger.debug(\"Dropped index\")\n\n return df", "def drop_dups(df,col_names=None):\n return df.dropDuplicates()", "def dropRedundantEcotypes(self, input_fname, ecotypeid2tg_ecotypeid):\n\t\tsys.stderr.write(\"Dropping redundant ecotypes ...\\n\")\n\t\treader = csv.reader(open(input_fname), delimiter=figureOutDelimiter(input_fname))\n\t\tcol_name2col_index = getColName2IndexFromHeader(reader.next())\n\t\tecotypeid_idx = col_name2col_index['ecotypeid']\n\t\thaplo_name_idx = col_name2col_index['haplogroup']\n\t\tnativename_idx = col_name2col_index['nativename']\n\t\ttg_ecotypeid2row = {}\n\t\tno_of_duplicates = 0\n\t\tno_of_duplicates_with_different_haplogroups = 0\n\t\tcounter = 0\n\t\tfor row in reader:\n\t\t\tecotypeid = int(row[ecotypeid_idx])\n\t\t\thaplo_name = row[haplo_name_idx]\n\t\t\tnativename = row[nativename_idx]\n\t\t\tif ecotypeid in ecotypeid2tg_ecotypeid:\n\t\t\t\ttg_ecotypeid = ecotypeid2tg_ecotypeid[ecotypeid]\n\t\t\t\tif tg_ecotypeid not in tg_ecotypeid2row:\n\t\t\t\t\ttg_ecotypeid2row[tg_ecotypeid] = row\n\t\t\t\telse:\n\t\t\t\t\tno_of_duplicates += 1\n\t\t\t\t\told_row = tg_ecotypeid2row[tg_ecotypeid]\n\t\t\t\t\told_ecotypeid = int(old_row[ecotypeid_idx])\n\t\t\t\t\told_haplo_name = old_row[haplo_name_idx]\n\t\t\t\t\told_nativename = row[nativename_idx]\n\t\t\t\t\tif old_haplo_name!=haplo_name:\n\t\t\t\t\t\tsys.stderr.write(\"ecotype %s(%s) in haplotype group %s, while duplicate %s(%s) in haplotype group %s.\\n\"%\\\n\t\t\t\t\t\t\t\t\t\t (ecotypeid, nativename, haplo_name, old_ecotypeid, old_nativename, old_haplo_name))\n\t\t\t\t\t\tno_of_duplicates_with_different_haplogroups += 1\n\t\t\t\t\tif ecotypeid==tg_ecotypeid:\t#replace if the new ecotypeid matching the tg_ecotypeid whether the haplotype group is same or not.\n\t\t\t\t\t\ttg_ecotypeid2row[tg_ecotypeid] = row\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Warning: ecotype %s not in ecotypeid2tg_ecotypeid.\\n\"%(ecotypeid))\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"no_of_duplicates: %s, out of which %s encompass different haplotype groups. %s accessions in total. Done.\\n\"%\\\n\t\t\t\t\t\t (no_of_duplicates, no_of_duplicates_with_different_haplogroups, counter))\n\t\treturn tg_ecotypeid2row", "def dropDuplicateStars(df):\n df = df.drop_duplicates(subset =\"host_name\", keep = 'first')\n return df", "def collect_csv(source_dir, dest_dir):\n source_dir = Path(source_dir)\n dest_dir = Path(dest_dir)\n for csvfile in source_dir.rglob(\"*.csv\"):\n species = normalized_species(csvfile)\n species_dir = dest_dir / species\n species_dir.mkdir(exist_ok=True, parents=True)\n date_time = normalized_datetime(csvfile)\n print(f\"Renaming {csvfile} to {species_dir / (date_time + '.csv')}\")\n csvfile.rename(species_dir / (date_time + \".csv\"))", "def clean_data(input_file, output_file):\n # Create data frame\n data = pd.read_csv(input_file, sep = \";\")\n \n # Remove unnecessary features from data frame\n data = data.drop([\"Name\",\"Ticket\",\"Cabin\"], axis=1)\n \n # Remove NaN values from remaining features\n data = data.dropna()\n \n # Save ready-to-use file\n data.to_csv(output_file, index=False)", "def concat_file(filename):\n csv_paths = read_csv(filename)\n\n data_len = 0\n df_total = None\n for csv_name, csv_path in tqdm(csv_paths):\n print(csv_name)\n df = dt.fread(csv_path).to_pandas()\n data_len += df.shape[0]\n\n process_df = filter_law(df)\n\n if df_total is None:\n df_total = process_df.copy()\n else:\n df_total = pd.concat([df_total, process_df], ignore_index=True)\n\n print(\"Total data count: {}\".format(data_len))\n df_total.to_csv('eda_concat.csv')", "def reformate_park_csv(list_num_park=[1, 2, 3],\n list_date_park=['2015', '2016'],\n sep=';'):\n\n # Reading parkX_20XX.csv ...\n df = create_df_park_data(list_num_park, list_date_park)\n\n # Dropping Useless columns for speed up\n df.drop(park_col_type['drop'], axis=1, inplace=True)\n\n # Converting in datetime types and keeping in GMT+01:\n print(\"Converting 'Date' column in datetime type ...\")\n df['Date'] = pd.to_datetime(df['Date'], format=\"%d/%m/%Y %H:%M\")\n\n # we create an ident for each hour \"Date_hour_int\"\n print('Constructing id for each date & hour ...')\n df[\"Date_hour_int\"] = df[\"Date\"].dt.year*10**6 + df[\"Date\"].dt.month*10**4\\\n + df[\"Date\"].dt.day*10**2 + df[\"Date\"].dt.hour\n\n # we create a dataframe with \"production_mean_hour\" value for each\n # Eolienne*date_hour_int\n print(\"Computing 'Production_mean_hour' ...\")\n df_product_mean = df[df[\"Fonctionnement\"] == 1]\\\n .groupby([\"Eolienne\", \"Date_hour_int\"])[\"Production\"]\\\n .mean().reset_index().rename(columns={\"Production\": \"Production_mean_hour\"})\n\n # we add this value in the initial dataset \"df\"\n df = pd.merge(df, df_product_mean,\n on=[\"Eolienne\", \"Date_hour_int\"], how=\"left\")\n df = df[park_col_type['keep']]\n\n # output csv files per turbine :\n for num_turb in range(1, 12):\n fname_out = data_reformated_dir + 'turb_' + str(num_turb) + '.csv'\n print('Storing ' + fname_out + ' ...')\n df_tmp = df.loc[df['Eolienne'] == 'Turb'+str(num_turb)]\n df_tmp.to_csv(fname_out, sep=sep, index=False)", "def parse_picard_markduplicate_metrics(sample, file):\n df = pd.read_csv(file, sep='\\t', comment='#')\n df['sample'] = sample\n return df.set_index('sample')", "def save_combined_clean_data(self):\n df = []\n for data in self.clean_data:\n df.append(data.df)\n df = pd.concat(df, axis=0, join='outer', ignore_index=False, keys=None,\n levels=None, names=None, verify_integrity=False, copy=True)\n file_name = \"../data/clean_data/\" + \"combined_clean_data + \" + '.csv'\n df.to_csv(file_name, sep=\";\", index=False)\n\n return(df)", "def add_uid2(filename, columns, outfilename):\n timer = time.time()\n print(f\"read file: {filename}\")\n df = pd.read_csv(filename)\n print(f\"timer - read file: {time.time() - timer}\")\n\n timer = time.time()\n print(f\"add uid to columns {columns}\")\n\n uid_arr = df[columns[1:]].apply(lambda x: [x[0], x[1]], axis=1)\n uid_arr_sorted = uid_arr.apply(lambda x: np.sort(x))\n df['uid'] = uid_arr_sorted.apply('_'.join)\n print(f\"timer - add uid using method 2: {time.time() - timer}\")\n\n timer = time.time()\n print(f\"save to file: {outfilename}\")\n df.to_csv(outfilename)\n print(f\"timer - savefile: {time.time() - timer}\")\n\n return outfilename", "def deduplicate_nhd(in_feature_class_or_table, out_feature_class_or_table ='', unique_id ='Permanent_Identifier'):\n # SETUP\n if out_feature_class_or_table:\n arcpy.AddMessage(\"Copying initial features to output...\")\n if arcpy.Describe(in_feature_class_or_table).dataType == \"FeatureClass\":\n arcpy.CopyFeatures_management(in_feature_class_or_table, out_feature_class_or_table)\n if arcpy.Describe(in_feature_class_or_table).dataType == \"Table\":\n arcpy.CopyRows_management(in_feature_class_or_table, out_feature_class_or_table)\n else:\n out_feature_class_or_table = in_feature_class_or_table\n\n # EXECUTE\n # Delete full identicals first--these come from overlaps in staged subregion data\n before_count = int(arcpy.GetCount_management(out_feature_class_or_table).getOutput(0))\n arcpy.AddMessage(\"Deleting full identicals...\")\n # Check for full identicals on original *attribute fields*, excluding the one we specifically created to make them distinct\n # Also excluding object ID since that is obviously distinct\n excluded_fields = ['Shape', 'Shape_Length', 'Shape_Area', 'OBJECTID', 'nhd_merge_id']\n check_fields = [f.name for f in arcpy.ListFields(out_feature_class_or_table) if f.name not in excluded_fields]\n arcpy.DeleteIdentical_management(out_feature_class_or_table, check_fields)\n after_full_count = int(arcpy.GetCount_management(out_feature_class_or_table).getOutput(0))\n arcpy.AddMessage(\"{0} features were removed because they were full identicals to remaining features.\".format(before_count - after_full_count))\n\n # Delete duplicated IDs by taking the most recent FDate--these come from NHD editing process somehow\n arcpy.AddMessage(\"Deleting older features with duplicated identifiers...\")\n\n # Get a list of distinct IDs that have duplicates\n arcpy.Frequency_analysis(out_feature_class_or_table, \"in_memory/freqtable\", unique_id)\n arcpy.TableSelect_analysis(\"in_memory/freqtable\", \"in_memory/dupeslist\", '''\"FREQUENCY\" > 1''')\n count_dupes = int(arcpy.GetCount_management(\"in_memory/dupeslist\").getOutput(0))\n\n #If there are any duplicates, remove them by keeping the one with the latest FDate\n if count_dupes > 0:\n dupe_ids = [row[0] for row in arcpy.da.SearchCursor(\"in_memory/dupeslist\", (unique_id))]\n dupe_filter = ''' \"{}\" = '{{}}' '''.format(unique_id)\n for id in dupe_ids:\n dates = [row[0] for row in arcpy.da.SearchCursor(out_feature_class_or_table, [\"FDate\"], dupe_filter.format(id))]\n with arcpy.da.UpdateCursor(out_feature_class_or_table, [unique_id, \"FDate\"], dupe_filter.format(id)) as cursor:\n for row in cursor:\n if row[1] == max(dates):\n pass\n else:\n cursor.deleteRow()\n after_both_count = int(arcpy.GetCount_management(out_feature_class_or_table).getOutput(0))\n arcpy.AddMessage(\"{0} features were removed because they were less recently edited than another feature with the same identifier.\".format(after_full_count - after_both_count))\n\n arcpy.AddIndex_management(out_feature_class_or_table, \"nhd_merge_id\", \"IDX_nhd_merge_id\")\n arcpy.Delete_management(\"in_memory/freqtable\")\n arcpy.Delete_management(\"in_memory/dupeslist\")", "def process(fileglob):\n\n filepaths = glob.glob(fileglob)\n\n for filepath in filepaths:\n datum_list = []\n aggregated_data = {'user_id': None, 'n': 0, 'sum': 0, 'min': 0, 'max': 0}\n\n for parsed_row in extract_csv_data(filepath):\n\n if aggregated_data['user_id'] is None:\n aggregated_data['user_id'] = parsed_row['user_id']\n\n if aggregated_data['user_id'] != parsed_row['user_id']:\n # We want earliest 'date' datum first.\n sorted_datum = sorted(datum_list, key=lambda k: k['date'])\n\n for datum in sorted_datum:\n aggregated_data = update_aggregated_data(aggregated_data, datum)\n\n aggregated_data = finalize_aggregated_data(aggregated_data)\n\n # Dump current stack of user info to output file.\n dump_aggregated_data(aggregated_data, output_filepath(filepath))\n\n # Re-initialize\n datum_list = []\n aggregated_data = {'user_id': parsed_row['user_id'], 'n': 0, 'sum': 0, 'min': 0, 'max': 0}\n\n \"\"\"\n We are still on same user_id so just append to datum_list.\n \"\"\"\n datum_list.append(parsed_row)\n\n\n \"\"\"\n At end of csv file, roll-up and dump last chunk of user_data.\n \"\"\"\n\n sorted_datum = sorted(datum_list, key=lambda k: k['date'])\n\n for datum in sorted_datum:\n aggregated_data = update_aggregated_data(aggregated_data, datum)\n\n aggregated_data = finalize_aggregated_data(aggregated_data)\n\n dump_aggregated_data(aggregated_data, output_filepath(filepath))", "def create_csvfile(options_path, recept_info,df):\n df_csv = df.copy()\n df_csv.index.names = ['Interacting positions']\n\n #Change dynX by full name of receptor, adding the dynid if there is more than a simulation for that receptor\n df_csv.columns = df_csv.columns.map(lambda x: recept_info[x][13])\n\n #Sorting by ballesteros Id's (helixloop column) and clustering order\n df_csv['Interacting positions'] = df_csv.index\n df_csv['helixloop'] = df_csv['Interacting positions'].apply(lambda x: re.sub(r'^(\\d)x',r'\\g<1>0x',x)) \n df_csv = df_csv.sort_values([\"helixloop\"])\n\n #Change jumplines by 'x' to avoid formatting problems\n def new_index(cell):\n cell = cell.replace('\\n\\n', ' ')\n cell = cell.replace('\\n', 'x')\n cell = cell.replace('xx', 'x')\n return cell\n\n df_csv['Interacting positions'] = df_csv['Interacting positions'].apply(lambda x: new_index(x))\n df_csv.index = df_csv['Interacting positions']\n\n #Drop columns\n df_csv.drop(columns = ['helixloop','Interacting positions'], inplace = True)\n\n #Store dataframe as csv\n df_csv.to_csv(path_or_buf = options_path+\"dataframe.csv\", float_format='%.1f')", "def copy_from_file(conn, df, table):\n # Save the dataframe to disk\n tmp_df = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"tmp_dataframe.csv\")\n df.to_csv(tmp_df, index=False, header=False)\n f = open(tmp_df, 'r')\n cursor = conn.cursor()\n try:\n cursor.copy_from(file=f, table=table, sep=\",\", columns=('event_number', 'priority', 'address', 'is_incident',\n 'geometry_wkt', 'timestamp', 'disposition'))\n cursor.execute(\n f\"DELETE FROM {table} A USING {table} B WHERE A.ctid < B.ctid AND A.event_number = B.event_number AND A.priority = B.priority AND A.address = B.address AND A.is_incident = B.is_incident AND A.geometry_wkt = B.geometry_wkt AND A.timestamp = B.timestamp AND A.disposition = B.disposition\")\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n os.remove(tmp_df)\n logger.error(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n logger.info(\"copy_from_file() done\")\n cursor.close()\n os.remove(tmp_df)", "def add_time_created_permalink_karma_submission_id(input_file_name, output_file_name):\n\n df = pd.read_csv(input_file_name)\n df = df.drop(['Unnamed: 0'], axis=1)\n\n df['created_utc'], df['permalink'], df['score'], df['link_id'] = df['comment_id'].apply(get_specific_comment_info)\n\n df.to_csv(output_file_name)", "def add_duplicate_schema(csv: pd.DataFrame, state: Random, verbose:bool) -> pd.DataFrame:\n schema_number = state.randint(0, max(1, len(csv)//2))\n for _ in tqdm(range(schema_number), desc=\"Adding duplicated schemas\", disable=not verbose):\n csv = add_row(csv, csv.columns, state=state)\n return csv", "def merge_cat(UT):\n csv_path = Path(\"./catalog\"+UT+\".csv\")\n if csv_path.exists() != 1:\n Popen('rm -rf merged'+UT+'.log', shell=True)\n Popen('touch merged'+UT+'.log', shell=True)\n all_files = glob.glob(\"./results/20*/\"+UT+\"/*\")\n print('merging table: {} (1/{})'.format(all_files[0],len(all_files)))\n tab = pd.read_csv(all_files[0])\n cat = tab.copy()\n merged = open('merged'+UT+'.log','a+')\n merged.write(all_files[0]+'\\n')\n try:\n for i, file in enumerate(all_files[1:]):\n print('merging table: {} ({}/{})'.format(file,i+2,len(all_files)))\n tab = pd.read_csv(file)\n cat = pd.merge(cat, tab, how='outer')\n merged.write(file+'\\n')\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n except:\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n else:\n cat = pd.read_csv('catalog'+UT+'.csv')\n all_files = glob.glob(\"./results/20*/\"+UT+\"/*\")\n merged = list(pd.read_table('merged'+UT+'.log', header=None).values)\n merged = [i[0] for i in merged]\n if set(all_files) == set(merged):\n print('GOOD NEWS: No new table is needed to be merged.')\n else:\n non_processed = list(set(all_files) - set(merged))\n merged = open('merged'+UT+'.log','a+')\n try:\n for i, new_img in enumerate(non_processed):\n print('merging table: {} ({}/{})'.format(new_img,i+1,len(non_processed)))\n tab = pd.read_csv(new_img)\n cat = pd.merge(cat, tab, how='outer')\n merged.write(new_img+'\\n')\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n except:\n cat.to_csv('catalog'+UT+'.csv', index=False, header=True)\n merged.close()\n cat = pd.read_csv('catalog'+UT+'.csv')\n m = Table(cat.values, names=cat.columns)\n hdu = fits.table_to_hdu(m)\n hdulist = fits.HDUList([fits.PrimaryHDU(), hdu])\n hdulist.writeto('catalog'+UT+'.fits', overwrite=True)", "def import_csv(item):\n (f_csv, f_csv_out, target_column, merge_columns) = item\n has_checked_keys = False\n\n if not merge_columns:\n raise ValueError(\"merge_columns must not be empty\")\n\n with open(f_csv_out, \"w\") as FOUT:\n CSV_HANDLE = None\n total_rows = 0\n\n for row in csv_iterator(f_csv):\n\n output = {\"_ref\": next(_ref_counter)}\n\n if not has_checked_keys:\n for key in merge_columns:\n if key not in row.keys():\n msg = \"Column **{}** not in csv file {}\"\n raise KeyError(msg.format(key, f_csv))\n has_checked_keys = True\n\n if target_column in row.keys():\n msg = \"Generated column **{}** already in csv file {}\"\n raise KeyError(msg.format(target_column, f_csv))\n\n text = []\n for key in merge_columns:\n val = row[key].strip()\n if not val:\n continue\n if val[-1] not in \".?!,\":\n val += \".\"\n text.append(val)\n\n output[target_column] = \"\\n\".join(text).strip()\n\n if CSV_HANDLE is None:\n CSV_HANDLE = csv.DictWriter(FOUT, sorted(output.keys()))\n CSV_HANDLE.writeheader()\n\n CSV_HANDLE.writerow(output)\n total_rows += 1\n\n logger.info(\"Imported {}, {} entries\".format(f_csv, total_rows))", "def save_csvFile(df,file_location,file_name,sep,encoding):\n try:\n date=datetime.datetime.now().replace(microsecond=0)\n fullpath=file_location + file_name\n df.to_csv(fullpath, sep=sep, encoding=encoding, index=False, header=True)\n except IOError:\n print('Error saving the file: ' , file_name)\n sys.exit(1)" ]
[ "0.7729395", "0.6743207", "0.6410341", "0.6338948", "0.6320352", "0.6265429", "0.611104", "0.60951626", "0.6093322", "0.5940891", "0.5861167", "0.57967466", "0.57279533", "0.5659171", "0.5623228", "0.5598873", "0.5594488", "0.5593631", "0.5475531", "0.54566574", "0.5449391", "0.5412171", "0.5388504", "0.536355", "0.5355186", "0.53284985", "0.5304311", "0.5293666", "0.5266408", "0.525995", "0.52418983", "0.52347827", "0.52330333", "0.5225257", "0.5208558", "0.5196698", "0.51913095", "0.5176771", "0.51699793", "0.51698303", "0.5160191", "0.5155832", "0.51380575", "0.5121941", "0.5071875", "0.506037", "0.5058864", "0.50529313", "0.5041993", "0.50331116", "0.5031578", "0.5030172", "0.5026374", "0.50075734", "0.5006634", "0.50023615", "0.49797338", "0.49766764", "0.4971096", "0.4970824", "0.4970545", "0.4948291", "0.49454695", "0.4944642", "0.49279374", "0.49274477", "0.49250418", "0.49249813", "0.4921617", "0.49197382", "0.49159074", "0.49121693", "0.4910438", "0.49091697", "0.49045813", "0.49044353", "0.4895529", "0.48897803", "0.48890948", "0.48888794", "0.48882064", "0.48863283", "0.4884959", "0.4883067", "0.48773384", "0.4873509", "0.48727462", "0.48718798", "0.48653492", "0.48551318", "0.48539716", "0.48512053", "0.48501003", "0.48405498", "0.48351038", "0.4834453", "0.48332363", "0.48326746", "0.48292562", "0.48279446" ]
0.78255796
0
This function checks for the size of a dataframe and splits it into parts containing approximately 1 million records as the default number of records for each dataframe.It also provides the option of writing the split dataframes to the disk. Parameters , type, return values
def dataFrameSplit(df, norec=1000000, outfile= None): # calculation of the no. of rows of the dataframe df_rsz = len(df.index) if df_rsz>norec: no_splits = np.ceil(df_rsz/norec) dfarr = np.array_split(df,no_splits) return dfarr else: print("The dataframe doesn't have sufficient records") # printing to disk when if outfile!=None: i=0 for arr in dfarr: arr.to_csv("D:\\ddf"+str(i+1)+".csv",encoding='utf-8', index=False, header=False) i = i+1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]\n\n return segments", "def test_03_dataframe_to_dataframe_w_chunksize(self):\n _, err = _iquery(\"store(flatten(DF1, cells_per_chunk:5), DF3)\")\n assert not err, err\n self._array_cleanups.append('DF3')\n check_v_sum('DF3')\n nchunks = chunk_count(vaid_of('DF3'))\n prt(\"DF3 has\", nchunks, \"chunks\")\n assert nchunks < self._df1_chunks, \"DF3 did not get dense!\"", "def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def return_size(df):\n return round(sys.getsizeof(df) / 1e9, 2)", "def split_data(df, test_size): \n\n X_train, X_test, y_train, y_test = train_test_split(df[[\"description_processed\", \"transaction_type\", \"transaction_account_type\"]],\n df['transaction_class'],\n test_size=test_size,\n shuffle=True,\n random_state=42)\n \n return X_train, X_test, y_train, y_test", "def uploade_how_many_rows_we_want(self, df):\r\n try:\r\n if len(df) > 300000 or df.memory_usage(deep=True).sum() > self.memory:\r\n raise Exception(\"batch request\")\r\n try:\r\n self.insert(df)\r\n \r\n except Exception as ex:\r\n if 'string contains an untranslatable character' in str(ex):\r\n for i in np.where(df.dtypes != np.float)[0]:\r\n df['drop'] = df[df.columns[i]].apply(lambda x: self.is_ascii(x))\r\n l_tmp = (df['drop'][df['drop']].index)\r\n if len(l_tmp) > 0:\r\n print(\"rows remove: \" + str(list(l_tmp)))\r\n df.drop(l_tmp, inplace=True)\r\n df.drop('drop', axis=1, inplace=True)\r\n elif 'batch request' in str(ex) or 'LAN message' in str(ex):\r\n raise Exception(\"batch request\")\r\n else:\r\n print('error')\r\n print(ex)\r\n raise error\r\n self.rows += len(df)\r\n\r\n\r\n except Exception as ex:\r\n if \"batch request\" in str(ex):\r\n \r\n # split the data to 2 dataframes\r\n len_data = math.ceil(len(df)/2)\r\n df1 = df.iloc[:len_data]\r\n df2 = df.iloc[len_data:]\r\n\r\n self.uploade_how_many_rows_we_want(df1)\r\n self.uploade_how_many_rows_we_want(df2)\r\n\r\n\r\n else:\r\n print (ex)\r\n raise error", "def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM", "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def df2db_separate(self, df: pd.DataFrame, tab_name):\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n\n max_df_size = 50000\n\n dfs = df_split(df, batch_size=max_df_size)\n num_piece = len(dfs)\n\n dfs[0].to_sql(tab_name, self.engine, method='multi', index=False)\n if num_piece > 1:\n for pdf in dfs[1:]:\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))\n pdf.to_sql(tab_name + '_tmp', self.engine, method='multi', index=False)\n self.execute(\"INSERT INTO TABLE {tn} SELECT * FROM {tt}\".format(\n tn=tab_name, tt=tab_name + '_tmp'\n ))\n print(len(pdf))\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))", "def split_on_whole_table(\n df: pyspark.DataFrame,\n ) -> pyspark.DataFrame:\n return df", "def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)", "def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test", "def calculateChunkSize(size, record_count, splits):\n avg_record_size = size / record_count\n logging.info(\n \"Avg record size: %0.02f=%d/%d\" %\n (avg_record_size, size, record_count))\n chunk = floor(ceil(size / (splits * avg_record_size)) * avg_record_size)\n\n logging.info(\n \"Setting chunk to: %d=floor(ceil(%d/(%d*%0.02f))*%0.02d)\" %\n (chunk, size, splits, avg_record_size, avg_record_size))\n return chunk", "def make_bedfiles():\n df = pd.read_csv(\"%s.length\" % ref, sep='\\t', header=None)\n thresh = math.ceil(sum(df[1]) / globals()['jobs_per_pool'])\n lines = []\n fcount = 0\n fsum = 0\n for count,row in enumerate(df.index):\n contig, length = list(df.loc[row, :])\n fsum += length\n lines.append([contig, str(length)])\n if fsum >= thresh or count + 1 == len(df.index):\n make_bedfile(lines, fcount)\n lines = []\n fcount += 1\n fsum = 0\n return fcount", "def split_df(df,\n test_size=.10,\n random_state=42):\n train_df, test_df = train_test_split(df,\n test_size=test_size,\n random_state=random_state)\n return train_df, test_df", "def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count", "def chunk_data(path, chunksize):\n reader = pandas.read_table(path, chunksize=chunksize, skiprows=0)\n\n start = 0\n for chunk in reader:\n stop = start + len(chunk) - 1\n dataframe_to_csv(chunk, file=get_chunk_file_name(path, (start, stop)))\n start = stop + 1\n\n return alphabetize_chunk_files(os.path.basename(path))", "def make_files_for_cases(size):\n case_counts = get_case_counts_for_primary_sites()\n for primary_site in case_counts:\n print(\"one done\")\n if case_counts[primary_site] >= size:\n temp_file = get_all_cases_from_primary_site(primary_site)\n if len(temp_file) >= size:\n df = pd.DataFrame(temp_file, columns = [\"primary_site\",\"case_uuid\", \"rna_seq_uuid\"])\n df.to_csv(\"data/\" + primary_site + \"_case_rna_uuids.csv\", sep = \",\")\n return", "def get_test_data_df(X,size: int = 1): \n num_rows = len(X)\n test_df = X.copy()\n\n while num_rows < size:\n test_df = test_df.append(test_df)\n num_rows = len(test_df)\n\n return test_df[:size].reset_index(drop = True)", "def split_dataframe(df:\"pandas.DataFrame, pandas.Series\", sections:\"int\"=5, drop_index:\"bool\"=True, output:\"str\"=\"dataframe\")-> \"None or pandas.DataFrame\":\n import numpy as np\n from IPython.display import display_html\n \n if sections <= 0:\n raise ValueError('number sections must be larger than 0.')\n \n ### Find out how to keep column names when dropindex=True\n ### if series, dont allow drop index?\n \n ### allow passing in of desired column names as an array of strings (will result\n ### in dup col names but it won't matter if its only being displayed and not used in calculations)\n\n if isinstance(df, pandas.Series):\n df = df.to_frame()\n \n if drop_index:\n df.reset_index(drop = True, inplace=True)\n else:\n df.reset_index(level=0, inplace=True)\n\n df_split = np.array_split(df, sections)\n num_rows = [column.shape[0] for column in df_split]\n \n if output == \"dataframe\":\n \n alldata = [column.values.tolist() for column in df_split]\n \n # Add empty rows to each DataFrame until all DataFrames have the same number of rows\n for i in range(len(alldata)):\n while len(alldata[i]) < max(num_rows):\n alldata[i].append([\"\"]*df.shape[1])\n\n # Create rows of values across all of the DataFrames in alldata\n # When each entire row is created, add it to the output DataFrame\n dataframe = [] # <-- Output DataFrame\n for row_index in range(max(num_rows)):\n across_row = []\n for dataf in alldata:\n across_row.extend(dataf[row_index])\n dataframe.extend([across_row])\n \n return pandas.DataFrame(data=dataframe)\n \n if output == \"html\":\n strHtml = ''\n for x in split_dataframe:\n strHtml += x.to_html()\n display_html(strHtml.replace('table','table style=\"display:inline\"'), raw=True)", "def readDBchunks(self, tablename, orderField, chunksize=50000,\n selectOptions=None, limit=None, filterOptions=None, verbose=False):\n\n if limit:\n remaining = limit\n next_chunk = min(remaining, chunksize)\n else:\n next_chunk = chunksize\n\n cont = 0\n \n selectOptions = selectOptions + ', ' + orderField\n\n df = self.readDBtable(tablename, limit=next_chunk, selectOptions=selectOptions,\n filterOptions = filterOptions, orderOptions=orderField)\n\n while (len(df)):\n cont = cont+len(df)\n if verbose:\n print('[DBManager (readDBchunks)] Number of rows read so far:', cont)\n if limit:\n remaining = limit - cont\n next_chunk = min(remaining, chunksize)\n else:\n next_chunk = chunksize\n yield df.iloc[:,:-1]\n\n #Next we need to start from last retrieved element\n filtercondition = orderField + '>' + str(df.iloc[:,-1][len(df)-1])\n if filterOptions:\n filtercondition = filtercondition + ' AND ' + filterOptions\n \n if next_chunk>0:\n df = self.readDBtable(tablename, limit=next_chunk, selectOptions=selectOptions,\n filterOptions = filtercondition, orderOptions=orderField)\n else:\n #If maximum number of records has been reached, set df to empty list to exit\n df = []", "def mixed_divide_by_events_lenght(data_df:pd.DataFrame, path_column, sizes_filename=None):\n sizes = None\n if sizes_filename is not None:\n if os.path.exists(sizes_filename):\n with open(sizes_filename, 'rb') as sizes_handler:\n sizes = pickle.load(sizes_handler)\n if sizes is None:\n sizes = dict()\n aux = 0\n for index, row in data_df.iterrows():\n sys.stderr.write('\\rdone {0:%}'.format(aux / len(data_df)))\n with open(row[path_column], 'rb') as file_handler:\n try:\n values = pickle.load(file_handler)\n except Exception as e:\n print(row[path_column])\n print(\"test\")\n print(e)\n raise ValueError()\n if len(values) not in sizes.keys():\n sizes[len(values)] = []\n sizes[len(values)].append(row['episode'])\n aux += 1\n if sizes_filename is not None:\n with open(sizes_filename, 'wb') as sizes_handler:\n pickle.dump(sizes, sizes_handler)\n return sizes", "def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size", "def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r", "def to_chunked_dataframe(\n self, max_chunk_size: int = -1, timeout_sec: int = DEFAULT_TIMEOUT_SEC\n ) -> pd.DataFrame:\n # Max chunk size defined by user\n records = []\n for result in self.result(timeout_sec=timeout_sec):\n result.append(records)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n\n # Handle for last chunk that is < max_chunk_size\n if not records:\n yield pd.DataFrame.from_records(records)", "def readFilesIntoDataFrame(nameTemplate, numOfFiles):\n #https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65\n\n list_of_dfs = []\n for i in range(numOfFiles):\n print ('Processing {0} out of {1} files'.format(i, numOfFiles))\n\n fileToProcess = fileLocation + nameTemplate.format(i)\n print 'fileToProcess=', fileToProcess\n \n if 'feather' in nameTemplate:\n read_df = feather.read_feather(fileToProcess)\n elif 'parquet' in nameTemplate:\n read_df = pd.read_parquet(fileToProcess)\n else:\n print 'This should not happen, nameTemplate is wrong, please check it is in parquet or feather format or that the template correctly describes the existing files, exiting...'\n sys.exit(1)\n\n print read_df.info(memory_usage='deep')\n print '-'*50\n print read_df.describe()\n list_of_dfs.append(read_df)\n \n print 'Start concatenating dataframes, it may take some time'\n comb_df = pd.concat(list_of_dfs, ignore_index=True)\n return comb_df", "def large_xarray_to_multi_parquet(xdf,float_id):\n for lvl in tqdm(xdf.N_PARAM.values.tolist()):\n df = xdf.sel(N_PARAM=lvl).to_dataframe()\n df = df.reset_index()\n df = process_chunked_df(df,float_id)\n df.to_parquet(f\"temp/{str(lvl)}.parquet\",use_deprecated_int96_timestamps=True)", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "async def split_large_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()", "def test_google_storage_small_batch_size(sdc_builder, sdc_executor, gcp):\n\n gcp_file_name = 'test_9_records.xls'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n bucket_name = get_random_string(ascii_lowercase, 10)\n\n storage_client = gcp.storage_client\n\n google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin')\n\n google_cloud_storage.set_attributes(bucket=bucket_name,\n common_prefix='gcs-test',\n prefix_pattern=gcp_file_name,\n data_format='EXCEL',\n max_batch_size_in_records=1,\n excel_header_option='WITH_HEADER')\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n pipeline_finisher_executor.set_attributes(preconditions=['${record:eventType() == \\'no-more-data\\'}'],\n on_record_error='DISCARD')\n\n google_cloud_storage >= pipeline_finisher_executor\n\n wiretap = pipeline_builder.add_wiretap()\n google_cloud_storage >> wiretap.destination\n\n pipeline = pipeline_builder.build(title='Google Cloud Storage').configure_for_environment(gcp)\n sdc_executor.add_pipeline(pipeline)\n\n created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name)\n try:\n blob = created_bucket.blob('gcs-test/' + gcp_file_name)\n blob.upload_from_filename('resources/gcp/' + gcp_file_name)\n\n # Start the pipeline and make sure the timeout is defined, since the original bug involved\n # an infinite loop reading the same file over and over again\n sdc_executor.start_pipeline(pipeline).wait_for_finished(timeout_sec=300)\n\n # Check that the file has been read through exactly once\n output_records = [record.field for record in wiretap.output_records]\n assert len(output_records) == 9\n # Check that no error records were generated\n error_records = [record.field for record in wiretap.error_records]\n assert len(error_records) == 0\n\n finally:\n logger.info('Deleting bucket %s ...', created_bucket.name)\n gcp.retry_429(created_bucket.delete)(force=True)", "def write_table(self, df):\n (part_names, grouped_df, part_offsets,) = _get_groups_and_offsets(\n df=df,\n partition_cols=self.partition_cols,\n preserve_index=self.common_args[\"index\"],\n )\n fs = ioutils._ensure_filesystem(None, self.path, None)\n fs.mkdirs(self.path, exist_ok=True)\n\n full_paths = []\n metadata_file_paths = []\n full_offsets = [0]\n\n for idx, keys in enumerate(part_names.itertuples(index=False)):\n subdir = fs.sep.join(\n [\n f\"{name}={val}\"\n for name, val in zip(self.partition_cols, keys)\n ]\n )\n prefix = fs.sep.join([self.path, subdir])\n fs.mkdirs(prefix, exist_ok=True)\n current_offset = (part_offsets[idx], part_offsets[idx + 1])\n num_chunks = 1\n parts = 1\n\n if self.max_file_size is not None:\n # get the current partition\n start, end = current_offset\n sliced_df = grouped_df[start:end]\n\n current_file_size = _get_estimated_file_size(sliced_df)\n if current_file_size > self.max_file_size:\n # if the file is too large, compute metadata for\n # smaller chunks\n parts = math.ceil(current_file_size / self.max_file_size)\n new_offsets = list(\n range(start, end, int((end - start) / parts))\n )[1:]\n new_offsets.append(end)\n num_chunks = len(new_offsets)\n parts = len(new_offsets)\n full_offsets.extend(new_offsets)\n else:\n full_offsets.append(end)\n\n curr_file_num = 0\n num_chunks = 0\n while num_chunks < parts:\n new_file_name = f\"{self.filename}_{curr_file_num}.parquet\"\n new_full_path = fs.sep.join([prefix, new_file_name])\n\n # Check if the same `new_file_name` exists and\n # generate a `new_file_name`\n while new_full_path in self._file_sizes and (\n self._file_sizes[new_full_path]\n + (current_file_size / parts)\n ) > (self.max_file_size):\n curr_file_num += 1\n new_file_name = (\n f\"{self.filename}_{curr_file_num}.parquet\"\n )\n new_full_path = fs.sep.join([prefix, new_file_name])\n\n self._file_sizes[new_full_path] = self._file_sizes.get(\n new_full_path, 0\n ) + (current_file_size / parts)\n full_paths.append(new_full_path)\n metadata_file_paths.append(\n fs.sep.join([subdir, new_file_name])\n )\n num_chunks += 1\n curr_file_num += 1\n else:\n self.filename = self.filename or _generate_filename()\n full_path = fs.sep.join([prefix, self.filename])\n full_paths.append(full_path)\n metadata_file_paths.append(\n fs.sep.join([subdir, self.filename])\n )\n full_offsets.append(current_offset[1])\n\n paths, metadata_file_paths, offsets = (\n full_paths,\n metadata_file_paths,\n full_offsets,\n )\n existing_cw_batch = defaultdict(dict)\n new_cw_paths = []\n partition_info = [(i, j - i) for i, j in zip(offsets, offsets[1:])]\n\n for path, part_info, meta_path in zip(\n paths,\n partition_info,\n metadata_file_paths,\n ):\n if path in self.path_cw_map: # path is a currently open file\n cw_idx = self.path_cw_map[path]\n existing_cw_batch[cw_idx][path] = part_info\n else: # path not currently handled by any chunked writer\n new_cw_paths.append((path, part_info, meta_path))\n\n # Write out the parts of grouped_df currently handled by existing cw's\n for cw_idx, path_to_part_info_map in existing_cw_batch.items():\n cw = self._chunked_writers[cw_idx][0]\n # match found paths with this cw's paths and nullify partition info\n # for partition_col values not in this batch\n this_cw_part_info = [\n path_to_part_info_map.get(path, (0, 0))\n for path in self._chunked_writers[cw_idx][1]\n ]\n cw.write_table(grouped_df, this_cw_part_info)\n\n if new_cw_paths:\n # Create new cw for unhandled paths encountered in this write_table\n new_paths, part_info, meta_paths = zip(*new_cw_paths)\n self._chunked_writers.append(\n (\n ParquetWriter(new_paths, **self.common_args),\n new_paths,\n meta_paths,\n )\n )\n new_cw_idx = len(self._chunked_writers) - 1\n self.path_cw_map.update({k: new_cw_idx for k in new_paths})\n self._chunked_writers[-1][0].write_table(grouped_df, part_info)", "def prepare_stops_to_request(df: pd.DataFrame) -> list:\n return [split_df(df, i, i + 100) for i in range(0, len(df), 100)]", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def take_sample(df, size=100, default=None):\n result = default or []\n if hasattr(df, 'take'):\n header = ','.join([f.name for f in df.schema.fields])\n result = [header]\n result.extend(\n df.limit(size).rdd.map(dataframe_util.convert_to_csv).collect())\n return result", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)", "def test_df_out_size(self):\n df_out = create_labels(dat, time_range=4)\n\n assert df_out.shape[1] == dat.shape[1]+1, \"Error, the produced DataFrame is not the correct size\"", "def data_splitting_tool(feature_cols = None, label_col = None ,val_size = 0.2, test_size = 0.2, random_state = 13):\n if (val_size != ''):\n total_test_size = val_size + test_size\n test_ratio = test_size/total_test_size\n X_train, X_test, y_train, y_test = train_test_split(feature_cols, label_col, test_size = total_test_size, random_state=random_state)\n X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=test_ratio, random_state=random_state)\n return(X_train, y_train, X_val,y_val, X_test, y_test)\n else:\n X_train, X_test, y_train, y_test = train_test_split(feature_cols, label_col, test_size = test_size, random_state=random_state)\n return(X_train, y_train, X_test, y_test)", "def FE_split_one_field_into_many(df_in, field, splitter, filler, new_names_list='', add_count_field=False):\r\n df_field = df_in[field].values\r\n df = copy.deepcopy(df_in)\r\n ### First copy whatever is in that field so we can save it for later ###\r\n df[field].fillna(filler, inplace=True)\r\n if add_count_field:\r\n ### there will be one extra field created when we count the number of contents in each field ###\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max() + 1\r\n else:\r\n max_things = df[field].map(lambda x: len(x.split(splitter))).max()\r\n if len(new_names_list) == 0:\r\n print(' Max. columns created by splitting %s field is %d.' %(\r\n field,max_things))\r\n else:\r\n if not max_things == len(new_names_list):\r\n print(\"\"\" Max. columns created by splitting %s field is %d but you have given %d \r\n variable names only. Selecting first %d\"\"\" %(\r\n field,max_things,len(new_names_list),len(new_names_list)))\r\n ### This creates a new field that counts the number of things that are in that field.\r\n if add_count_field:\r\n #### this counts the number of contents after splitting each row which varies. Hence it helps.\r\n num_products_viewed = 'Content_Count_in_'+field\r\n df[num_products_viewed] = df[field].map(lambda x: len(x.split(splitter))).values\r\n ### Clean up the field such that it has the right number of split chars otherwise add to it\r\n ### This fills up the field with empty strings between each splitter. You can't do much about it.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n fill_string = splitter + filler\r\n df[field] = df[field].map(lambda x: x+fill_string*(max_things-len(x.split(splitter))) if len(\r\n x.split(splitter)) < max_things else x)\r\n ###### Now you create new fields by split the one large field ########\r\n if isinstance(new_names_list, str):\r\n if new_names_list == '':\r\n new_names_list = [field+'_'+str(i) for i in range(1,max_things+1)]\r\n else:\r\n new_names_list = [new_names_list]\r\n ### First fill empty spaces or NaNs with filler ###\r\n df.loc[df[field] == splitter, field] = filler\r\n for i in range(len(new_names_list)):\r\n try:\r\n df[new_names_list[i]] = df[field].map(lambda x: x.split(splitter)[i]\r\n if splitter in x else filler)\r\n except:\r\n df[new_names_list[i]] = filler\r\n continue\r\n ### there is really nothing you can do to fill up since they are filled with empty strings.\r\n #### Leave this as it is. It is not something you can do right now. It works.\r\n df[field] = df_field\r\n return df, new_names_list", "def find_partitions(df, match_func, max_size=None, block_by=None):\n\n # If block_by is provided, then we apply the algorithm to each block and\n # stitch the results back together\n if block_by is not None:\n blocks = df.groupby(block_by).apply(lambda g: find_partitions(\n df=g,\n match_func=match_func,\n max_size=max_size\n ))\n\n keys = blocks.index.unique(block_by)\n for a, b in zip(keys[:-1], keys[1:]):\n blocks.loc[b, :] += blocks.loc[a].iloc[-1] + 1\n\n return blocks.reset_index(block_by, drop=True)\n\n def get_record_index(r):\n return r[df.index.name or 'index']\n\n # Records are easier to work with than a DataFrame\n records = df.to_records()\n\n # This is where we store each partition\n partitions = []\n\n def find_partition(at=0, partition=None, indexes=None):\n\n r1 = records[at]\n\n if partition is None:\n partition = {get_record_index(r1)}\n indexes = [at]\n\n # Stop if enough duplicates have been found\n if max_size is not None and len(partition) == max_size:\n return partition, indexes\n\n for i, r2 in enumerate(records):\n\n if get_record_index(r2) in partition or i == at:\n continue\n\n if match_func(r1, r2):\n partition.add(get_record_index(r2))\n indexes.append(i)\n find_partition(at=i, partition=partition, indexes=indexes)\n\n return partition, indexes\n\n while len(records) > 0:\n partition, indexes = find_partition()\n partitions.append(partition)\n records = np.delete(records, indexes)\n\n return pd.Series({\n idx: partition_id\n for partition_id, idxs in enumerate(partitions)\n for idx in idxs\n })", "def preprocessing(database: pd.DataFrame,\n check_size: bool = False) -> pd.DataFrame:\n info = Informant(check_size, database.index.size)\n database['time'] = database['time'].round(3)\n\n database.drop_duplicates(inplace=True)\n info(\"drop_duplicates\", database.index.size)\n\n database = fix_duplicate_time(database)\n info(\"fix_time_duplicates\", database.index.size)\n\n database = quartile(quartile(database, 'x'), 'y')\n info(\"quartile\", database.index.size)\n\n return database", "def get_data(file_size):\n data_funcs_by_size = {'small': data.get_small, 'medium': data.get_medium, 'large': data.get_large}\n all_data = data_funcs_by_size[file_size]()\n train_data, test_data = data.split(all_data, 0.8)\n return train_data, test_data", "def create_df(f, outdir):\n # Limit RAM usage by specifying chunksize, which is the number of rows in a df\n chunksize = 10 ** 3\n\n # Create ErrorMessage object, with the default failed\n readme_txt = ErrorMessage(\"NAMECOLCHECK;FAILED\", \"SAMPLESIZE;FAILED\", \"PANDAS;PASSED\")\n\n # Save some variables\n colnames = [] # So you only have the fix the columns once, and not for each chunk \n first_chunk = True # Boolean to determine whether its the first chunk\n passed = False # Boolean to determine whether the cohort passed, other chunks are not necccesary if failed\n delimeter = None # Auto determine delimiter\n fail_reason = \"\" # Empty string\n\n # First check whether its space or tab delimited, requires special pandas parameter. Assume that when there is one column, its space delimited\n df = pd.read_csv(f, nrows=1)\n if len(df.columns) == 1:\n delimeter = \"\\t\"\n df = pd.read_csv(f, nrows=1, delimiter=delimeter)\n if len(df.columns) == 1:\n delimeter = \" \"\n\n try:\n # Now process the files, Requires pandas version lower than 1.2!!\n for df in pd.read_csv(f, chunksize=chunksize, delimiter=delimeter):\n if first_chunk:\n # Change boolean\n first_chunk = False\n \n # Check the headers\n df, fail_reason = check_headers(df, f)\n readme_txt.namecol = fail_reason\n\n # Save colnames\n colnames = df.columns\n\n # Check and fix the sample size\n df, fail_reason = check_n(df)\n readme_txt.samplesize = fail_reason\n\n # Save as zipped space delimited file\n if \"FAILED\" not in readme_txt.namecol:\n if \"FAILED\" not in readme_txt.samplesize:\n passed = True\n name = f.split(\".gz\")[0].split('/')[-1]\n df.to_csv(outdir + \"/\" + name + \"_cols_edit.gz\", \n sep=\" \",\n index=False,\n na_rep='NA', \n compression=\"gzip\")\n else:\n # Only continue if the cohort passed, if the number of columns is wrong down the file it will error out here\n if passed:\n # Rename columns\n df.columns = colnames\n\n # Fix N\n df, fail_reason = check_n(df)\n readme_txt.samplesize = fail_reason\n\n # Append output to existing file without header\n name = f.split(\".gz\")[0].split('/')[-1]\n df.to_csv(outdir + \"/\" + name + \"_cols_edit.gz\", \n sep=\" \",\n index=False,\n na_rep='NA', \n compression=\"gzip\",\n mode='a', \n header=False)\n\n except Exception as e:\n # This happens when the N columns isn't the same everywhere\n # Save some messsages\n readme_txt.pandascheck = \"PANDAS;FAILED;\" + str(e)\n\n # Save some messsages\n write_message(f, readme_txt, outdir)", "def test_large_chunk(self):\n chunksize = 7 * (1024 ** 2)\n size = 8 * (1024 ** 3)\n self.assertEqual(find_chunksize(size, chunksize), chunksize * 2)", "def splitter (data1, data2):\n flow_data = list()\n fare_record_data = list()\n\n for line in data1:\n line = [line[2:6],line[6:10],line[10:15],line[15:18],line[18],line[19],line[36:39],line[20:28],line[28:36],line[42:49]]\n flow_data.append(line)\n\n flow = pd.DataFrame(flow_data, columns=[\"ORIGIN_CODE\",\"DESTINATION_CODE\",\"ROUTE_CODE\",\"STATUS_CODE\",\"USAGE_CODE\",\"DIRECTION\",\"TOC\",\"VALID_UNTIL\",\"VALID_FROM\",\"FLOW_ID\"])\n flow['ROUTE_CODE'] = flow['ROUTE_CODE'].astype(object)\n flow.index.name=\"flow_idx\"\n\n for line in data2:\n line=[line[2:9],line[9:12],line[12:20]]\n fare_record_data.append(line)\n\n fare_record = pd.DataFrame(fare_record_data, columns=[\"FLOW_ID\",\"TICKET_CODE\",\"FARE\"])\n fare_record.index.name = \"fare_record_idx\"\n\n return flow,fare_record", "def getSizePerChunk(infile, splits, fileType, splitOnSize=False):\n if infile is None:\n raise Exception(\"We cannot determine chunk size from STDIN!\")\n\n if splitOnSize:\n # get a custom function that returns the size of this type of record\n recordSizer=fileType.sizer\n else:\n # just return 1 for each record\n recordSizer=recordCounter\n\n # loop through records\n inhandle = openInputFile(infile)\n record_count = 0\n totalSize = 0\n for record in fileType.recordStreamer(inhandle):\n record_count += 1\n totalSize+=recordSizer(record)\n inhandle.close()\n\n return calculateChunkSize(totalSize, record_count, splits)", "def ghetto_split(list_, chunk_size=100):\n logging.debug(f\"Splitting list of {len(list_)} length, chunk size = {chunk_size}\")\n split_lists = []\n for i in range(0,len(list_),chunk_size):\n split_lists.append(list_[i:i+chunk_size])\n logging.debug(f\"List has been split into {len(split_lists)} lists. Total num of elements in split lists is {sum([len(i) for i in split_lists])}\")\n return split_lists", "def split_data(df: pd.DataFrame,\n parameters: Dict) -> Tuple[pd.DataFrame, pd.DataFrame,\n pd.DataFrame, pd.DataFrame,\n pd.DataFrame, pd.DataFrame\n ]:\n\n df_new_customer = df.loc[df['rank'] == 1].reset_index(drop=True)\n df_existing_customer = df.loc[df['rank'] > 1].reset_index(drop=True)\n\n # EXISTING CUSTOMER\n size = df_existing_customer.shape[0]\n train_size = int(size * parameters['train'])\n test_size = int(size * parameters['test'])\n\n df_existing_customer = df_existing_customer.sample(frac=1, random_state=1).reset_index(drop=True)\n df_train = df_existing_customer.iloc[:train_size].sort_index()\n df_test = df_existing_customer.iloc[train_size:(train_size+test_size)].sort_index()\n df_valid = df_existing_customer.iloc[(train_size+test_size):].sort_index()\n\n # NEW CUSTOMER\n size = df_new_customer.shape[0]\n train_size = int(size * parameters['train'])\n test_size = int(size * parameters['test'])\n\n df_new_customer = df_new_customer.sample(frac=1, random_state=1).reset_index(drop=True)\n df_train_new_customer = df_new_customer.iloc[:train_size].sort_index()\n df_test_new_customer = df_new_customer.iloc[train_size:(train_size+test_size)].sort_index()\n df_valid_new_customer = df_new_customer.iloc[(train_size+test_size):].sort_index()\n return df_train, df_test, df_valid, df_train_new_customer, df_test_new_customer, df_valid_new_customer", "def test_small_chunk(self):\n chunksize = 7 * (1024 ** 2)\n size = 8 * (1024 ** 2)\n self.assertEqual(find_chunksize(size, chunksize), chunksize)", "def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df", "def to_pickles(df, path, split_size=3, inplace=True):\n print(f'shape: {df.shape}')\n \n if inplace==True:\n df.reset_index(drop=True, inplace=True)\n else:\n df = df.reset_index(drop=True)\n gc.collect()\n mkdir_p(path)\n \n kf = KFold(n_splits=split_size)\n for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):\n df.iloc[val_index].to_pickle(f'{path}/{i:03d}.p')\n return", "def process_chunked_df(df,float_id):\n df.loc[:, df.columns != 'JULD'] = df.loc[:, df.columns != 'time'].apply(pd.to_numeric, errors='ignore',downcast='signed')\n\n \"\"\"adds depth as column\"\"\"\n df[\"depth\"] = df['PRES']\n\n \"\"\"adds float ID column from float_path_name\"\"\"\n df[\"float_id\"] = int(float_id)\n\n \"\"\"rename ST cols\"\"\"\n df = rename_bgc_cols(df)\n\n \"\"\"drops any invalid ST rows\"\"\"\n df = df.dropna(subset=['time', 'lat','lon','depth'])\n \"\"\"adds climatology day,month,week,doy columns\"\"\"\n df = data.add_day_week_month_year_clim(df)\n \"\"\"reorders bgc_df with ST index leading followed by float_id and cycle\"\"\"\n df = reorder_bgc_data(df)\n \"\"\"strips any whitespace from col values\"\"\"\n df = cmn.strip_whitespace_data(df)\n \"\"\"removes comma delmim from sci cols\"\"\"\n df = replace_comm_delimiter(df)\n \"\"\"removes any inf vals\"\"\"\n df = df.replace([np.inf, -np.inf], np.nan) \n return df", "def getWindows(df, size=75, step=15):\n start = 0\n while start+size < df.count():\n yield start, start + size #pd.to_timedelta(size, unit='m'))\n start += step", "def splitlist(listname, basefilename, n):\n\n # Split the list into chunks\n chunks = [listname[x:x + n] for x in range(0, len(listname), n)]\n list_group = []\n num_lists = len(chunks)\n\n # Name and save the lists\n for chunk, num in zip(chunks, range(0, num_lists)):\n listdf = pd.DataFrame(chunk)\n n = basefilename + '_list_' + str(num)\n listdf.to_csv(n + \".txt\", index=False, header=None)\n list_group.append(n)\n return list_group", "def splitter(file_in,\n aantal_banen,\n afwijkings_waarde,\n totaal,\n aantal_rollen,\n ongeveer_per_baan,\n outgoing_posix_pad):\n # afwijkings_waarde = 0 deze komt nu uit def\n\n file_in = pd.read_csv(file_in, \";\")\n a = 0\n\n begin_eind_lijst = []\n be_LIJST = []\n\n for num in range(aantal_rollen):\n b = file_in.aantal.iloc[a:num+1].sum()\n # print(a, num)\n\n if num == (len(file_in) - 1):\n c = file_in.aantal.iloc[a:num].sum()\n begin_eind_lijst.append([c, a, num + 1])\n be_LIJST.append([a, num + 1])\n\n csv_naam = Path(f\"{outgoing_posix_pad}/{a:>{0}{5}}.csv\")\n print(csv_naam)\n file_in.iloc[a : (num + 1)].to_csv(csv_naam)\n print(\"splitter klaar\")\n\n elif b >= ongeveer_per_baan + afwijkings_waarde:\n\n csv_naam = Path(f\"{outgoing_posix_pad}/{a:>{0}{5}}.csv\")\n print(csv_naam)\n file_in.iloc[a : (num + 1)].to_csv(csv_naam) # num + 1 ??\n\n begin_eind_lijst.append([b, a, num])\n be_LIJST.append([a, num + 1])\n be_LIJST.append(f\"[{a}:{num}]\")\n a = num + 1\n\n continue\n\n return print(begin_eind_lijst), print(be_LIJST)", "def toDataFrame(self, split=True):\n\n def cleanColumns(df):\n # Cleanup columns\n colnames = df.columns\n colnames=[c.replace('\\'','') for c in colnames]\n colnames=[c[1:] if c.startswith('/') else c for c in colnames]\n # If there is only one group, we remove the group key\n groupNames = self.groupNames\n if len(groupNames)==1:\n nChar = len(groupNames[0])\n colnames=[c[nChar+1:] for c in colnames] # +1 for the \"/\"\n df.columns = colnames\n\n fh = self['data']\n if split:\n # --- One dataframe per group. We skip group that have empty data\n dfs={}\n for group in fh.groups():\n try:\n df = group.as_dataframe(time_index=True)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = group.as_dataframe(time_index=False)\n if len(df)>0:\n dfs[group.name] = df\n if len(dfs)==1:\n dfs=dfs[group.name]\n return dfs\n else:\n # --- One dataframe with all data\n try:\n df = fh.as_dataframe(time_index=True)\n cleanColumns(df)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = fh.as_dataframe(time_index=False)\n return df", "def splitter(fasta_file, output, limit, large_handling=False):\n file_ = open(fasta_file, 'r')\n file_count = 1\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n nt_count = 0\n for seq in SeqIO.parse(fasta_file, 'fasta'):\n if large_handling == True and len(str(seq.seq)) >= int(limit):\n file_count += 1\n largefile = open(output.rstrip(\"/\")+\"/%s_%05d_XL.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n largefile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\")\n largefile.close()\n else:\n nt_count += len(str(seq.seq))\n outfile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\") \n if nt_count >= int(limit):\n outfile.close()\n file_count += 1\n nt_count = 0\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n outfile.close()", "def get_num_chunks(self) -> int:", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def process_test(self, test_filename=\"data/test.tsv\", output_folder=\"result\", output_filename=\"test_proc.tsv\", chunksize=500, sep=\"\\t\", n_jobs=cpu_count, process_method=\"standardization\"):\n append = False #for creating file and later appending\n\n self._check_path(output_folder) #check if exists output_folder\n full_output_filename = os.path.join(output_folder, output_filename) #full path\n\n for chunk in pd.read_csv(test_filename, chunksize=chunksize, sep=sep):\n chunk_split = np.array_split(chunk.values, n_jobs)\n\n pool = Pool(n_jobs)\n result_map = pool.map(partial(self._process_chunk_for_test,process_method=process_method),chunk_split)\n\n pool.close()\n pool.join()\n\n for df in result_map:\n if append:\n df.to_csv(full_output_filename, header=False, mode='a', index=None, sep=\"\\t\")\n else:\n df.to_csv(full_output_filename, header=True, index=None, sep=\"\\t\")\n append = True", "def submit_df(self, project_id, df, chunk_size=1000, row_offset=0):\n df_type = list(set(df[\"type\"]))\n df.rename(\n columns={c: c.lstrip(\"*\") for c in df.columns}, inplace=True\n ) # remove any leading asterisks in the DataFrame column names\n\n # Check uniqueness of submitter_ids:\n if len(list(df.submitter_id)) != len(list(df.submitter_id.unique())):\n raise Gen3Error(\n \"Warning: file contains duplicate submitter_ids. \\nNote: submitter_ids must be unique within a node!\"\n )\n\n # Chunk the file\n print(\"Submitting {} DataFrame with {} records.\".format(df_type, len(df)))\n program, project = project_id.split(\"-\", 1)\n api_url = \"{}/api/v0/submission/{}/{}\".format(self._endpoint, program, project)\n headers = {\"content-type\": \"text/tab-separated-values\"}\n\n start = row_offset\n end = row_offset + chunk_size\n chunk = df[start:end]\n\n count = 0\n\n results = {\n \"invalid\": {}, # these are invalid records\n \"other\": [], # any unhandled API responses\n \"details\": [], # entire API response details\n \"succeeded\": [], # list of submitter_ids that were successfully updated/created\n \"responses\": [], # list of API response codes\n }\n\n # Start the chunking loop:\n while (start + len(chunk)) <= len(df):\n\n timeout = False\n valid_but_failed = []\n invalid = []\n count += 1\n print(\n \"\\tChunk {} (chunk size: {}, submitted: {} of {})\".format(\n str(count),\n str(chunk_size),\n str(len(results[\"succeeded\"]) + len(results[\"invalid\"])),\n str(len(df)),\n )\n )\n\n try:\n response = requests.put(\n api_url,\n auth=self._auth_provider,\n data=chunk.to_csv(sep=\"\\t\", index=False),\n headers=headers,\n ).text\n except requests.exceptions.ConnectionError as e:\n results[\"details\"].append(e.message)\n\n # Handle the API response\n if (\n \"Request Timeout\" in response\n or \"413 Request Entity Too Large\" in response\n or \"Connection aborted.\" in response\n or \"service failure - try again later\" in response\n ): # time-out, response != valid JSON at the moment\n\n print(\"\\t Reducing Chunk Size: {}\".format(response))\n results[\"responses\"].append(\"Reducing Chunk Size: {}\".format(response))\n timeout = True\n\n else:\n try:\n json_res = json.loads(response)\n except JSONDecodeError as e:\n print(response)\n print(str(e))\n raise Gen3Error(\"Unable to parse API response as JSON!\")\n\n if \"message\" in json_res and \"code\" not in json_res:\n print(json_res) # trouble-shooting\n print(\n \"\\t No code in the API response for Chunk {}: {}\".format(\n str(count), json_res.get(\"message\")\n )\n )\n print(\"\\t {}\".format(str(json_res.get(\"transactional_errors\"))))\n results[\"responses\"].append(\n \"Error Chunk {}: {}\".format(str(count), json_res.get(\"message\"))\n )\n results[\"other\"].append(json_res.get(\"message\"))\n\n elif \"code\" not in json_res:\n print(\"\\t Unhandled API-response: {}\".format(response))\n results[\"responses\"].append(\n \"Unhandled API response: {}\".format(response)\n )\n\n elif json_res[\"code\"] == 200: # success\n\n entities = json_res.get(\"entities\", [])\n print(\"\\t Succeeded: {} entities.\".format(str(len(entities))))\n results[\"responses\"].append(\n \"Chunk {} Succeeded: {} entities.\".format(\n str(count), str(len(entities))\n )\n )\n\n for entity in entities:\n sid = entity[\"unique_keys\"][0][\"submitter_id\"]\n results[\"succeeded\"].append(sid)\n\n elif (\n json_res[\"code\"] == 400\n or json_res[\"code\"] == 403\n or json_res[\"code\"] == 404\n ): # failure\n\n entities = json_res.get(\"entities\", [])\n print(\"\\tChunk Failed: {} entities.\".format(str(len(entities))))\n results[\"responses\"].append(\n \"Chunk {} Failed: {} entities.\".format(\n str(count), str(len(entities))\n )\n )\n\n message = \"\"\n for entity in entities:\n sid = entity[\"unique_keys\"][0][\"submitter_id\"]\n if entity[\"valid\"]: # valid but failed\n valid_but_failed.append(sid)\n else: # invalid and failed\n message = str(entity[\"errors\"])\n results[\"invalid\"][sid] = message\n invalid.append(sid)\n print(\n \"\\tInvalid records in this chunk: {}, {}\".format(\n len(invalid), message\n )\n )\n\n elif json_res[\"code\"] == 500: # internal server error\n\n print(\"\\t Internal Server Error: {}\".format(response))\n results[\"responses\"].append(\n \"Internal Server Error: {}\".format(response)\n )\n\n if (\n len(valid_but_failed) > 0 and len(invalid) > 0\n ): # if valid entities failed bc grouped with invalid, retry submission\n chunk = chunk.loc[\n df[\"submitter_id\"].isin(valid_but_failed)\n ] # these are records that weren't successful because they were part of a chunk that failed, but are valid and can be resubmitted without changes\n print(\n \"Retrying submission of valid entities from failed chunk: {} valid entities.\".format(\n str(len(chunk))\n )\n )\n\n elif (\n len(valid_but_failed) > 0 and len(invalid) == 0\n ): # if all entities are valid but submission still failed, probably due to duplicate submitter_ids. Can remove this section once the API response is fixed: https://ctds-planx.atlassian.net/browse/PXP-3065\n print(\"\\tChunk with error:\\n\\n{}\\n\\n\".format(chunk))\n print(\"\\tUnhandled API response. Adding chunk to 'other' in results. Check for special characters or malformed links or property values.\")\n results[\"other\"].append(chunk)\n start += chunk_size\n end = start + chunk_size\n chunk = df[start:end]\n\n elif timeout == False: # get new chunk if didn't timeout\n start += chunk_size\n end = start + chunk_size\n chunk = df[start:end]\n\n else: # if timeout, reduce chunk size and retry smaller chunk\n if chunk_size >= 2:\n chunk_size = int(chunk_size / 2)\n end = start + chunk_size\n chunk = df[start:end]\n print(\n \"Retrying Chunk with reduced chunk_size: {}\".format(\n str(chunk_size)\n )\n )\n timeout = False\n else:\n raise Gen3SubmissionError(\n \"Submission is timing out. Please contact the Helpdesk.\"\n )\n\n print(\"Finished data submission.\")\n print(\"Successful records: {}\".format(str(len(set(results[\"succeeded\"])))))\n print(\"Failed invalid records: {}\".format(str(len(results[\"invalid\"]))))\n\n return results", "def __split_df(self, df:pd.DataFrame, ratio:float, rem_day4:bool, shuffle:bool, n_vec: int=1) -> Tuple[list, list, list, list]:\n X_test = []\n X_train = [] \n y_test = [] \n y_train = [] \n\n header = df['label'].tolist()\n responses = df['response'].tolist()\n # Removing Day 4\n trails = set()\n for i in range(len(header)):\n if rem_day4 and responses[i] == \"0\":\n pass\n else:\n trails.add(header[i])\n \n header = trails\n\n # Getting all the matrices from the trials\n for trial in header:\n # geting rows with (day, Trail)-label\n rows = df.loc[df['label'] == trial].to_numpy()\n # getting response label\n response = rows[0][-1]\n # getting the actual data from the matrix\n rows = np.delete(rows, np.s_[0,1,-1], axis=1)\n if shuffle:\n # shuffle PC-Matrix\n np.random.shuffle(rows)\n\n if n_vec == 1:\n pass\n else:\n new_rows = []\n # taking samples\n while len(rows) > n_vec:\n vecs = rows[:n_vec]\n # deleting vectors that are already taken\n rows = rows[n_vec:]\n # Concat vectors to one\n new_rows.append(np.concatenate(vecs))\n rows = new_rows\n\n # Splitting into Test and training\n cut = int(ratio*len(rows))\n for i in range(len(rows)):\n if i < cut or ratio == 0.0:\n X_train.append(rows[i])\n y_train.append(response)\n else:\n X_test.append(rows[i])\n y_test.append(response)\n\n return X_train, X_test, y_train, y_test", "def split_datasets(ql_file, nl_question_file, output_dir, split):\n\n with io.open(ql_file, encoding='utf-8') as query_org, \\\n io.open(nl_question_file, encoding='utf8') as nl_org:\n ql = query_org.readlines()\n nl = nl_org.readlines()\n\n split = split / 100\n\n train_ql, val_ql, train_nl, val_nl = train_test_split(ql, nl,\n train_size=split,\n random_state=42,\n shuffle=True)\n\n with io.open(output_dir + \"-train.ql\", 'w', encoding='utf-8') \\\n as ql_train, \\\n io.open(output_dir + \"-val.ql\", 'w', encoding='utf-8') \\\n as ql_val, \\\n io.open(output_dir + \"-train.nl\", 'w', encoding='utf-8') \\\n as nl_train, \\\n io.open(output_dir + \"-val.nl\", 'w', encoding='utf-8') \\\n as nl_val:\n ql_train.writelines(train_ql)\n ql_val.writelines(val_ql)\n nl_train.writelines(train_nl)\n nl_val.writelines(val_nl)", "def run(self, dataset_size=4, n_jobs=-1, starting_block=0):\n data_files = sorted(self.input_directory.glob(\"**/*.txt\"))\n log.info(f\"Creating shape file based on {len(data_files)} samples.\")\n\n n_blocks = int(len(data_files) / dataset_size)\n data_file_blocks = split(data_files, n_blocks)\n dataset_blocks_ids = np.arange(len(data_file_blocks))\n\n if starting_block != 0:\n data_file_blocks = data_file_blocks[starting_block:]\n dataset_blocks_ids = dataset_blocks_ids[starting_block:]\n log.info(f\"Starting at a different block number: {starting_block}.\")\n n_blocks = int(len(data_file_blocks))\n\n log.info(f\"Going through {n_blocks} blocks in parallel.\")\n Parallel(n_jobs=n_jobs)(\n delayed(self.generate_single_block)(data_file_block, dataset_block_id)\n for (data_file_block, dataset_block_id) in tqdm(\n zip(data_file_blocks, dataset_blocks_ids)\n )\n )\n\n log.info(\"Combining the separate index files..\")\n index_floorplan = sorted(self.output_directory.glob(\"index_floorplans_*.csv\"))\n log.info(f\"Found {len(index_floorplan)} index block files.\")\n index_files = pd.concat([pd.read_csv(_file) for _file in index_floorplan])\n index_files = index_files.fillna(0)\n index_files.to_csv(self.output_directory / \"index_floorplans.csv\", index=False)", "def split_data(self, df, valid_boundary=2016, test_boundary=2018):\n\n stock_count = len(self.sl)\n test_ratio = 0.2\n print('Stock count:%d'% stock_count)\n train_x = []\n test_x = []\n for label_, d_ in enumerate(self.sl):\n stock_train_len = int(len(d_.train_y) * (1 - test_ratio))\n train_x += list(d_.train_x[:stock_train_len])\n test_x += list(d_.train_x[stock_train_len:])\n\n train_g = pd.DataFrame(train_x, columns=([k[0] for k in self._column_definition]))\n test_g = pd.DataFrame(test_x, columns=([k[0] for k in self._column_definition]))\n\n self.set_scalers(train_g)\n\n def tofloat(data):\n for col in data.columns:\n if col not in {'Symbol', 'date'}:\n data[col] = data[col].astype('float32')\n return data\n\n\n train_g = tofloat(train_g)\n test_g = tofloat(test_g)\n # used test for both valid and test\n return train_g, test_g, test_g", "def test_super_chunk(self):\n chunksize = MAX_SINGLE_UPLOAD_SIZE + 1\n size = MAX_SINGLE_UPLOAD_SIZE * 2\n self.assertEqual(find_chunksize(size, chunksize),\n MAX_SINGLE_UPLOAD_SIZE)", "def __divide_into_batches(self):\n print('Creating batches for parallel execution')\n num_suites = len(self.execution_file_json['suites'])\n full_batches = num_suites // self.max_suites\n print('- Full batches=%s' % full_batches)\n if num_suites % self.max_suites > 0:\n has_partial = True\n else:\n has_partial = False\n print('- Partial batch at end: %s' % has_partial)\n if has_partial:\n total_batches = full_batches + 1\n else:\n total_batches = full_batches\n print('- %s suites will be divided into %s container batches using max suites %s' % (\n num_suites, total_batches, self.max_suites))\n self.suite_batches = []\n # split full batches\n for batch_counter in range(0, full_batches):\n start_index = batch_counter * self.max_suites\n batch = []\n for counter in range(start_index, start_index + self.max_suites):\n batch.append(self.execution_file_json['suites'][counter])\n self.suite_batches.append(batch)\n print('- full batches created', self.suite_batches)\n # add partial batch\n if has_partial:\n start_index = full_batches * self.max_suites\n batch = []\n for counter in range(start_index, num_suites):\n batch.append(self.execution_file_json['suites'][counter])\n self.suite_batches.append(batch)\n print('- partial batch created', self.suite_batches)", "def seed_other_dataset(name: str, chunk_size: int, start=None, end=None):\n objects = []\n for chunk in pd.read_csv(name, chunksize=chunk_size, header=1):\n chunk_as_mat = chunk.to_numpy()\n chunk_start = datetime.datetime.strptime(str(chunk_as_mat[0][0]), \"%Y%m%d\")\n chunk_end = datetime.datetime.strptime(str(chunk_as_mat[-1][0]), \"%Y%m%d\")\n if start is not None and start > chunk_end:\n continue\n if end is not None and end < chunk_start:\n break\n # print(chunk.to_numpy())\n objects += insert_into_sql(chunk.to_numpy())\n return objects", "def store_dfs_in_HANA(df_filenames,table_name,multiprocessing=False):\r\n\r\n for index,df_filename in enumerate(df_filenames):\r\n df = pd.read_csv(df_filename, compression='gzip', header=0, sep=',', quotechar='\"')\r\n del df[\"Unnamed: 0\"]\r\n colnames = list(df.columns.values)\r\n #REMOVE before flight\r\n drop_table_in_HANA(colnames, table_name)\r\n create_table_in_HANA(colnames, table_name)\r\n number_of_parts = math.ceil(len(df.index)/settings['chunksize'])\r\n number_of_parts = settings['num_cores']\r\n\r\n if multiprocessing:\r\n with multiprocessing.Pool(settings['num_cores']) as pool:\r\n pool.imap_unordered(partial(store_partial_df,table_name=table_name), numpy.array_split(df,number_of_parts))\r\n pool.close()\r\n pool.join()\r\n else:\r\n store_partial_df(df, table_name)\r\n\r\n logging.info(\"Finished storing {0} df\".format(index))\r\n\r\n # dont forget to close the connestion otherwise we may run into\r\n # connect issues.\r\n hana.close()", "def split_data_set(reddit_path, data_set_name, on, num_splits, target_directories, map_columns=None):\n targets = {}\n for i in range(num_splits):\n targets[i] = os.path.join(target_directories[i], data_set_name)\n mkdir(targets[i])\n\n full_sub_data_path = os.path.join(reddit_path, data_set_name)\n data_files = map(lambda f: os.path.join(full_sub_data_path, f), os.listdir(full_sub_data_path))\n args_list = [(on, table_file, targets, num_splits, map_columns) for table_file in data_files]\n\n pool = mp.Pool(pool_size)\n pool.map(unpack_split_file_with_map, args_list)", "def split_dataframe(df, n_split, axis=0):\n\n # TODO: implement axis logic\n\n if df.shape[0] < n_split:\n raise ValueError(\n 'n_split ({}) can\\'t be greater than the number of rows ({}).'.\n format(n_split, df.shape[0]))\n elif n_split <= 0:\n raise ValueError('n_split ({}) can\\'t be less than 0.'.format(n_split))\n\n n = df.shape[0] // n_split\n\n splits = []\n\n for i in range(n_split):\n start_i = i * n\n end_i = (i + 1) * n\n splits.append(df.iloc[start_i:end_i, :])\n\n i = n * n_split\n if i < df.shape[0]:\n splits.append(df.ix[i:])\n\n return splits", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def data_split(config_path: Text) -> None:\n\n config = yaml.safe_load(open(config_path))\n\n dataset = pd.read_csv(config['featurize']['features_data'])\n train_dataset, test_dataset = train_test_split(\n dataset, \n test_size = config['data_split']['test_size'],\n random_state = config['data_split']['random_state']\n )\n\n train_csv_path = config['data_split']['train_path']\n test_csv_path = config['data_split']['test_path']\n train_dataset.to_csv(train_csv_path, index=False)\n test_dataset.to_csv(test_csv_path, index=False)", "def getMyChunkSize(numJobs, numWorkers, chunkSize, rank):\n print \"numJobs, numWorkers: \", numJobs, numWorkers, chunkSize\n assert(numJobs >= numWorkers)\n allJobs = np.arange(numJobs)\n startInd = (np.arange(numWorkers)) * chunkSize\n endInd = (np.arange(numWorkers) + 1) * chunkSize\n endInd[-1] = numJobs\n myJobs = allJobs[startInd[rank]:endInd[rank]]\n return myJobs", "def split_and_batch(data_loader, \n batch_size, \n doclength,\n h5_path,\n rng_seed=888,\n normalizer_fun=data_utils.normalize,\n transformer_fun=data_utils.to_one_hot,\n balance_labels=False,\n max_records=None):\n data_batches = batch_data(data_loader, batch_size,\n normalizer_fun=normalizer_fun,\n transformer_fun=None,\n max_records=max_records,\n balance_labels=balance_labels,\n nlabels=2)\n (_, _), (train_size, test_size) = split_data(data_batches, \n h5_path, overwrite_previous=False, rng_seed=rng_seed)\n def train_batcher():\n (a,b),(a_size,b_size)=split_data(None, h5_path=h5_path, overwrite_previous=False, shuffle=True)\n return batch_data(a,\n normalizer_fun=lambda x: x,\n transformer_fun=transformer_fun,\n flatten=True,\n batch_size=batch_size)\n def test_batcher():\n (a,b),(a_size,b_size)=split_data(None, h5_path, overwrite_previous=False,shuffle=False)\n return batch_data(b,\n normalizer_fun=lambda x: x,\n transformer_fun=transformer_fun,\n flatten=True,\n batch_size=batch_size)\n\n return (train_batcher, test_batcher), (train_size, test_size)", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def split_train_into_chunks(chunk_size):\n for syscall_type in SYSCALLS:\n syscalls_split_file = open(f\"{TEMP_DIR}/{syscall_type}-split.train\", \"w\")\n snd_train_path = f\"{FILE_PATH}/{syscall_type}/{syscall_type}.train\"\n with open(snd_train_path) as train_file:\n for syscall in train_file:\n # Generate all n-grams of the current syscall\n n_grams = extract_n_grams(syscall.strip(),chunk_size,unique=True)\n if len(n_grams)==0:\n continue\n # Write n-grams to syscall chunks file\n syscalls_split_file.writelines(n_grams)\n syscalls_split_file.close()", "def get_table_chunk_size(self):\n result = self.query(\n sql.table_avg_row_len,\n (\n self._current_db,\n self.table_name,\n ),\n )\n if result:\n tbl_avg_length = result[0][\"AVG_ROW_LENGTH\"]\n # avoid huge chunk row count\n if tbl_avg_length < 20:\n tbl_avg_length = 20\n self.select_chunk_size = self.chunk_size // tbl_avg_length\n # This means either the avg row size is huge, or user specified\n # a tiny select_chunk_size on CLI. Let's make it one row per\n # outfile to avoid zero division\n if not self.select_chunk_size:\n self.select_chunk_size = 1\n log.info(\n \"TABLE contains {} rows, table_avg_row_len: {} bytes,\"\n \"chunk_size: {} bytes, \".format(\n result[0][\"TABLE_ROWS\"], tbl_avg_length, self.chunk_size\n )\n )\n log.info(\"Outfile will contain {} rows each\".format(self.select_chunk_size))\n self.eta_chunks = max(\n int(result[0][\"TABLE_ROWS\"] / self.select_chunk_size), 1\n )\n else:\n raise OSCError(\"FAIL_TO_GUESS_CHUNK_SIZE\")", "def batch_dataset(x, batch_size):\r\n\tsize_modulo = len(x) % batch_size # hack to ensure data is batches successfully\r\n\tif size_modulo != 0:\r\n\t\tx = x[:-size_modulo]\r\n\tpartitioned = np.split(x, batch_size)\r\n\treturn partitioned", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def split_data_set(data_set, index, value, part=0):\n # save the subdataset\n res_data_set = []\n \n for entry in data_set:\n # find the data set to the left of the partition point\n if part == 0 and float(entry[index])<= value: #求划分点左侧的数据集\n reduced_entry = entry[:index]\n # after partitioning, the value of the index column in the data is removed\n reduced_entry.extend(entry[index + 1:]) \n res_data_set.append(reduced_entry)\n # find the data set to the right of the partition point\n if part ==1 and float(entry[index])> value: \n reduced_entry = entry[:index]\n reduced_entry.extend(entry[index + 1:])\n res_data_set.append(reduced_entry)\n return res_data_set", "def chunk_size(self) -> global___Expression:", "def run(self):\n\t\tdf_iter = self.file_to_df(50000)\n\t\tdf_airport = self.airport_file_to_df()\n\t\tfor df in df_iter: # type: pd.DataFrame\n\t\t\tdf.drop_duplicates(inplace=True)\n\t\t\tdf = self.transform(df, df_airport)\n\n\t\t\tdf_result = self.get_only_new_records(\n\t\t\t\tdf=df,\n\t\t\t\tdf_columns=self.join_columns,\n\t\t\t\ttable_columns=self.join_columns\n\t\t\t)\n\n\t\t\tif len(df_result) > 0:\n\t\t\t\t# df_result.drop(self.table_columns, axis=1)\n\n\t\t\t\tself.save(\n\t\t\t\t\tdf=df_result,\n\t\t\t\t\ttable_name=\"travel_dimension\",\n\t\t\t\t\tdf_columns=self.table_columns,\n\t\t\t\t\ttable_colums=self.table_columns\n\t\t\t\t)", "def my_train_test_split(act_my_data, act_test_size=0.5):\r\n act_train_df, act_test_df = train_test_split(act_my_data,\r\n test_size=act_test_size)\r\n return act_train_df, act_test_df", "def get_test_data(size: int = 1): \n num_rows = len(X)\n test_df = X.copy()\n\n while num_rows < size:\n test_df = np.append(test_df, test_df, axis=0)\n num_rows = len(test_df)\n\n return test_df[:size]", "def create_input_chunks_distributed(cs, partition, data_dir, file_format):\n if not file_format == \"HDF5\":\n print(\"File format not supported yet. Aborting...\")\n sys.exit(1)\n\n for i in range(6):\n for filename in os.listdir('/disk' + str(i) + '/gtimothee'):\n if filename.endswith(\".json\") or filename.endswith(\".hdf5\"):\n os.remove(os.path.join('/disk' + str(i) + '/gtimothee', filename))\n print(f\"Creating input chunks...\")\n\n disk_index = 0\n repartition_dict = dict()\n\n for i in range(partition[0]):\n for j in range(partition[1]):\n for k in range(partition[2]):\n print(f\"Creating random array... shape: {cs}\")\n arr = da.random.uniform(size=cs)\n print(f\"Done, converting to float16...\")\n arr = arr.astype(np.float16)\n out_filename = f'{i}_{j}_{k}.hdf5'\n print(f\"Building {out_filename} with shape {cs}\")\n data_dirpath = os.path.join('/disk' + str(disk_index), 'gtimothee')\n outfilepath = os.path.join(data_dirpath, out_filename)\n print(f\"Storing on {data_dirpath}...\")\n da.to_hdf5(outfilepath, '/data', arr, chunks=None, compression=None)\n\n repartition_dict[str((i,j,k))] = outfilepath\n\n disk_index += 1\n if disk_index == 6:\n disk_index = 0\n\n print(f\"Writing repartition file...\")\n json_file = os.path.join('/disk0', 'gtimothee', 'repartition_dict.json')\n if os.path.isfile(json_file):\n os.remove(json_file)\n\n with open(json_file, 'w+') as outfile:\n json.dump(repartition_dict, outfile)", "def split_dataset(df, split_method, data_testing, random_seed, train_frac=0.8, test_frac=0.1):\n\n # Get data_type and data_value from split parameters\n # If no data_type is provided, data_type is the same as split_method\n data_type = data_testing['data_type'] if data_testing['data_type'] else split_method\n data_value = data_testing['data_value']\n\n if not split_method in df:\n raise KeyError(\"No split_method '{}' was not found in metadata\".format(split_method))\n if not data_type in df:\n logger.warning(\"No data_type named '{}' was found in metadata. Not taken into account \"\n \"to split the dataset.\".format(data_type))\n data_type = split_method\n\n # Filter dataframe with rows where split_method is not NAN\n df = df[df[split_method].notna()]\n\n # If no data_value list is provided, create a random data_value according to data_type and test_fraction\n # Split the TEST and remainder set using sklearn function\n if len(data_value) == 0 and test_frac != 0:\n data_value = sorted(df[data_type].unique().tolist())\n test_frac = test_frac if test_frac >= 1 / len(data_value) else 1 / len(data_value)\n data_value, _ = train_test_split(data_value, train_size=test_frac, random_state=random_seed)\n if len(data_value) != 0:\n for value in data_value:\n if value not in df[data_type].values:\n logger.warning(\"No data_value '{}' was found in '{}'. Not taken into account \"\n \"to split the dataset.\".format(value, data_type))\n X_test = df[df[data_type].isin(data_value)]['filename'].unique().tolist()\n X_remain = df[~df[data_type].isin(data_value)][split_method].unique().tolist()\n\n # List dataset unique values according to split_method\n # Update train fraction to apply to X_remain\n data = sorted(df[split_method].unique().tolist())\n train_frac_update = train_frac * len(data) / len(X_remain)\n if ((train_frac_update > (1 - 1 / len(X_remain)) and len(X_remain) < 2) or train_frac_update > 1):\n raise RuntimeError(\"{}/{} '{}' remaining for training and validation sets, train_fraction {} is too large, \"\n \"validation set would be empty.\".format(len(X_remain), len(data), split_method, train_frac))\n\n # Split remainder in TRAIN and VALID sets according to train_frac_update using sklearn function\n X_train, X_val = train_test_split(X_remain, train_size=train_frac_update, random_state=random_seed)\n\n # Print the real train, validation and test fractions after splitting\n real_train_frac = len(X_train)/len(data)\n real_valid_frac = len(X_val)/len(data)\n real_test_frac = 1 - real_train_frac - real_valid_frac\n logger.warning(\"After splitting: train, validation and test fractions are respectively {}, {} and {}\"\n \" of {}.\".format(round(real_train_frac, 3), round(real_valid_frac, 3),\n round(real_test_frac, 3), split_method))\n\n # Convert train and valid sets from list of \"split_method\" to list of \"filename\"\n X_train = df[df[split_method].isin(X_train)]['filename'].unique().tolist()\n X_val = df[df[split_method].isin(X_val)]['filename'].unique().tolist()\n\n # Make sure that test dataset is unseen during training\n # (in cases where there are multiple \"data_type\" for a same \"split_method\")\n X_train = list(set(X_train) - set(X_test))\n X_val = list(set(X_val) - set(X_test))\n\n return X_train, X_val, X_test", "def load_df(\n file_name: str, mode: str = \"pandas\", save: bool = True, chunksize: int = 1_000_000\n) -> pd.DataFrame:\n\n file_path = os.path.join(DATA_PATH, file_name)\n\n if mode == \"bz2\":\n keys = [\"quoteID\", \"quotation\", \"speaker\", \"date\", \"numOccurrences\", \"phase\"]\n\n with bz2.open(file_path, \"rb\") as quote_file:\n df = pd.DataFrame(\n [\n dict(zip(keys, map(json.loads(instance).get, keys)))\n for instance in tqdm(quote_file)\n ]\n )\n else:\n if not save:\n print(\"Please enable save option.\")\n return\n\n with pd.read_json(file_path, lines=True, chunksize=chunksize) as df_reader:\n for i, chunk in enumerate(df_reader):\n file_name = file_name.strip(\".json.bz2\")\n pkl_path = os.path.join(PKL_PATH, f\"{file_name}-{i:03d}.pkl\")\n chunk.to_pickle(pkl_path)\n\n if save and not os.path.exists(pkl_path):\n file_name = file_name.strip(\".json.bz2\")\n df.to_pickle(os.path.join(PKL_PATH, pkl_path))\n\n return df", "def test_n_splits(self):\n for n_splits, n_jobs in product([1, 6], [None, 2, 8]):\n with self.subTest(input='list', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=n_splits,\n n_jobs=n_jobs), n_splits)\n\n with self.subTest(input='numpy', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None,\n n_splits=n_splits, n_jobs=n_jobs), n_splits)", "def split_dataset(df, predict_window):\n\n #split dataset into train and test datasets\n #train 80 percent of rows\n dataset_train = np.array(df[:int(df.shape[0]*0.8)])\n\n #test dataset is 20 percent of rows\n #50 - that's where historical data and prediction overlap\n dataset_test = np.array(df[int(df.shape[0]*0.8)- predict_window:])\n\n return dataset_train, dataset_test", "def _divide_into_test_train(\n self, test_size: int, train_size: int\n ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n X_train, X_test, y_train, y_test = train_test_split(\n self.df.iloc[:, :-1],\n self.df.iloc[:, -1],\n test_size=test_size,\n train_size=train_size,\n )\n return X_train, X_test, y_train, y_test", "def splitFileIntoShards(filename, shardsize):\n os.popen('split -a 4 -d --additional-suffix=_shard -l{} {}'.format(shardsize, filename))", "def split_data(self, val_size=0.1, test_size=0.5):\n df = pd.read_csv(self.csv_path, delimiter=';')\n train, val = train_test_split(df, test_size=val_size)\n val, test = train_test_split(df, test_size=test_size)\n return train, val, test", "def test_multithread_batch_size(sdc_builder, sdc_executor, snowflake, num_tables, parallel_transfers, num_records,\n batch_size, reader_threads, processor_threads):\n base_table_name = f'STF_TABLE_{get_random_string(string.ascii_uppercase, 5)}'\n stage_name = f'STF_STAGE_{get_random_string(string.ascii_uppercase, 5)}'\n tables = [f'{base_table_name}_{i}' for i in range(0, num_tables)]\n\n engine = snowflake.engine\n\n # Path inside a bucket in case of AWS S3 or path inside container in case of Azure Blob Storage container.\n storage_path = f'{STORAGE_BUCKET_CONTAINER}/{get_random_string(string.ascii_letters, 10)}'\n snowflake.create_stage(stage_name, storage_path, stage_location='INTERNAL')\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n snowflake_origin = pipeline_builder.add_stage(name=BULK_STAGE_NAME)\n snowflake_origin.set_attributes(stage_location='INTERNAL',\n max_batch_size=batch_size,\n maximum_stage_file_reader_threads=reader_threads,\n maximum_stage_file_processing_threads=processor_threads,\n snowflake_stage_name=stage_name,\n table_config=[{'inclusionPattern': f'{base_table_name}%'}])\n\n wiretap = pipeline_builder.add_wiretap()\n snowflake_origin >> wiretap.destination\n\n pipeline = pipeline_builder.build().configure_for_environment(snowflake)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n logger.info(f'Creating {num_tables} tables with {num_records} per table...')\n column_names, column_definitions, primary_keys_clause = get_columns_information(DEFAULT_COLUMNS)\n records_to_insert = [(i, get_random_string(string.ascii_uppercase, 5)) for i in range(0, num_records)]\n for table in tables:\n create_table(engine, table, column_definitions, primary_keys_clause)\n insert_values(engine, table, column_names, records_to_insert, 10000)\n\n # benchmark the pipeline\n sdc_executor.benchmark_pipeline(pipeline, record_count=num_records)\n finally:\n snowflake.delete_staged_files(storage_path)\n snowflake.drop_entities(stage_name=stage_name)\n for table in tables:\n drop_table(engine, table)\n engine.dispose()", "def kb_train_test_split(test_size, random_state):\n\n cnxn_path = \"/polyaxon-data/goldenretriever/db_cnxn_str.txt\"\n conn = pyodbc.connect(open(cnxn_path, 'r').read())\n\n SQL_Query = pd.read_sql_query('''SELECT dbo.query_labels.id, dbo.query_db.query_string, \\\n dbo.kb_clauses.processed_string, dbo.kb_raw.kb_name, dbo.kb_raw.type FROM dbo.query_labels \\\n JOIN dbo.query_db ON dbo.query_labels.query_id = dbo.query_db.id \\\n JOIN dbo.kb_clauses ON dbo.query_labels.clause_id = dbo.kb_clauses.id \\\n JOIN dbo.kb_raw ON dbo.kb_clauses.raw_id = dbo.kb_raw.id''', conn)\n\n df = pd.DataFrame(SQL_Query).set_index('id')\n kb_names = df['kb_name'].unique()\n\n train_dict = dict()\n test_dict = dict()\n\n train_idx_all = []\n test_idx_all = []\n\n for kb_name in kb_names:\n kb_id = df[df['kb_name'] == kb_name].index.values\n train_idx, test_idx = train_test_split(kb_id, test_size=test_size,\n random_state=random_state)\n \n train_dict[kb_name] = train_idx\n test_dict[kb_name] = test_idx\n \n for k,v in train_dict.items():\n for idx in v:\n train_idx_all.append(idx)\n \n for k,v in test_dict.items():\n for idx in v:\n test_idx_all.append(idx)\n \n return df, train_dict, test_dict, train_idx_all, test_idx_all", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def test_chunk_size(self):\n for chunk_size, expected_n_chunks in [(1, 100), (3, 34), (200, 1), (None, 1)]:\n with self.subTest(chunk_size=chunk_size):\n iterable_of_args, iterable_len, chunk_size_, n_splits = apply_numpy_chunking(\n self.test_data_numpy, chunk_size=chunk_size, n_splits=1\n )\n\n # Materialize generator and test contents. The chunks should be of size chunk_size (expect for the last\n # chunk which can be smaller)\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n chunk_size = chunk_size or 100\n for chunk_idx, chunk in enumerate(iterable_of_args):\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[chunk_idx * chunk_size:\n (chunk_idx + 1) * chunk_size])\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size_, 1)\n self.assertIsNone(n_splits)" ]
[ "0.73547655", "0.67736286", "0.6035461", "0.59178704", "0.5877275", "0.58713686", "0.584475", "0.5791403", "0.57895786", "0.5737366", "0.57224447", "0.57164466", "0.5714758", "0.57108814", "0.5700032", "0.5689146", "0.56854934", "0.5663078", "0.5655827", "0.5648846", "0.5632305", "0.56116503", "0.5605787", "0.5585047", "0.5555051", "0.55461866", "0.55366606", "0.553417", "0.55228126", "0.55179846", "0.55152315", "0.55081826", "0.55049855", "0.5503067", "0.54854196", "0.5458328", "0.54363286", "0.54092014", "0.53857553", "0.538424", "0.5347638", "0.5344481", "0.53287476", "0.53270286", "0.53231436", "0.53204703", "0.53180826", "0.53177255", "0.5313598", "0.5309232", "0.530649", "0.5304401", "0.5284688", "0.52827555", "0.5279578", "0.5274643", "0.5265068", "0.5248114", "0.52401674", "0.52331555", "0.52331114", "0.5232541", "0.521597", "0.52101415", "0.5207079", "0.51966834", "0.51914257", "0.51904607", "0.518742", "0.518576", "0.517302", "0.5169782", "0.5164704", "0.5159934", "0.51546395", "0.51521814", "0.51481104", "0.5147868", "0.51470315", "0.51456136", "0.51449186", "0.51343334", "0.5134019", "0.51217186", "0.5112215", "0.51115584", "0.5111177", "0.5105795", "0.5097319", "0.5082438", "0.5078107", "0.507574", "0.5075316", "0.5074149", "0.506977", "0.5066761", "0.5061018", "0.50582093", "0.5056507", "0.50558716" ]
0.70206773
1
Configure logging for training log. The format is `log_{log_fname}_{comment}.log` .g. for `train.py`, the log_fname is `log_train.log`. Use `logging.info(...)` to record running log.
def config_logging(comment=None): # Get current executing script name import __main__, os exe_fname=os.path.basename(__main__.__file__) log_fname = "log_{}".format(exe_fname.split(".")[0]) if comment is not None and str(comment): log_fname = log_fname + "_" + str(comment) log_fname = log_fname + ".log" log_format = "%(asctime)s [%(levelname)-5.5s] %(message)s" logging.basicConfig( level=logging.INFO, format=log_format, handlers=[logging.FileHandler(log_fname), logging.StreamHandler()] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logging(config: Any) -> Logger:\n green = \"\\033[32m\"\n reset = \"\\033[0m\"\n logger = setup_logger(\n name=f\"{green}[ignite]{reset}\",\n level=logging.DEBUG if config.debug else logging.INFO,\n format=\"%(name)s: %(message)s\",\n filepath=config.output_dir / \"training-info.log\",\n )\n return logger", "def configure_logging(conf, unit_prompt, units):\n print()\n if conf.get('logging', None) is None:\n prompt = 'Would you like to log information?'\n options = ['Nothing Logged', 'Errors Only', 'Errors and Summary Activity',\n 'Errors, Summary Activity, and Deletions', 'Nearly all Activity']\n conf['logging'] = options.index(ask_options(\n prompt, options, default='Nothing Logged'))\n if conf['logging'] > 0 and conf.get('logging_limit', None) is None:\n prompt = 'What is the maximum file size of the log file?' + \\\n unit_prompt + '\\nEnter -1 for unlimited.'\n conf['logging_limit'] = numeric_response(prompt, units, default='10MB')\n return conf", "def initLogging ( logFile ):\n logging.basicConfig(\n filename=logFile,\n level=logging.INFO,\n format='%(asctime)s %(levelname)-8s %(message)s',\n filemode='w'\n )", "def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def configure_logging():\n\n level = logging.INFO\n logging.getLogger().setLevel(level)\n logging.basicConfig(\n level=level,\n format=(\n \"[%(asctime)s][%(levelname)s][%(filename)s:%(lineno)d]\"\n + \"[%(processName)s] %(message)s\"\n ),\n )", "def init_log():\n log_op = LogOp()\n log_op.config_log(logging.DEBUG)", "def set_log_info():\n set_log_level_format(logging.INFO,\n '%(asctime)s %(levelname)s:%(name)s:%(message)s')", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def setup_logging( cfg ):\n global _LOGGING_FORMAT_, _DATE_FORMAT_\n format,date = _LOGGING_FORMAT_,_DATE_FORMAT_\n \n if not cfg.get('logging', True):\n logging.basicConfig(handler=logging.NullHandler)\n return\n \n #check passed in cfgs if formats changed\n if cfg.get('log_format', False):\n format = cfg.get('log_format')\n if cfg.get('log_date_format',False):\n date = cfg.get('log_date_format')\n \n if cfg.get('log_debug', False):\n logging.basicConfig(level=logging.DEBUG,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path', 'errors.log'))\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n logging.getLogger().addHandler(console)\n \n elif cfg.get('log_warnings', False):\n logging.basicConfig(level=logging.WARNING,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))\n \n else:# Errors are always logged. deal.\n logging.basicConfig(level=logging.ERROR,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))", "def on_train_begin(self, logs=None):\n f = open(self.log_file_path, \"a\")\n f.write(f\"{'=' * 5}{self.model_name}({self.hp_log_title}){'=' * 5}\\n\")\n f.close()", "def configure_py_log(directory=None, filename=sys.argv[0], mode=\"w\"):\n if directory is None:\n logging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n filename=os.path.join(directory, filename),\n filemode=mode,\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\",\n )", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def log_one_epoch(\n self, epoch: int, log_info: List[Tuple[str, float, Callable[[float], str]]]\n ) -> None:\n log_str = f\"Epoch: [{epoch} | {self.config['EPOCHS']-1}]\\t\"\n log_str += \"\\t\".join([f\"{name}: \" + f(val) for name, val, f in log_info])\n logger.info(log_str)\n\n # logging\n if self.wandb_log:\n model_utils.wlog_weight(self.model)\n wandb.log(dict((name, val) for name, val, _ in log_info))", "def logging_setup(args, log_dir):\n timestamp_file = datetime.now().strftime(\"%Y%m%d-%H.%M_rcf_abb.log\")\n log_file = Path(log_dir) / timestamp_file\n\n handlers = []\n\n if not args.skip_logfile:\n handlers.append(log.FileHandler(log_file, mode=\"a\"))\n if not args.quiet:\n handlers.append(log.StreamHandler(sys.stdout))\n\n log.basicConfig(\n level=log.DEBUG if args.debug else log.INFO,\n format=\"%(asctime)s:%(levelname)s:%(funcName)s:%(message)s\",\n handlers=handlers,\n )", "def set_log_file(filename):\n pass", "def _configure_logging(self):\n self.log_level = Scaffold.LOG_LEVEL_MAP.get(self.log_level, ERROR)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # assign the windmill instance logger\n #logging.basicConfig()\n self.log = logging.getLogger(self.name)\n self.log.setLevel(self.log_level)\n\n if self.log_path:\n file_path = None\n if self.log_path.endswith('.log'):\n file_path = self.log_path\n else:\n file_path = os.path.join(self.log_path, self.name + '.log')\n assert file_path\n file_handler = logging.FileHandler(file_path)\n file_handler.setLevel(self.log_level)\n file_handler.setFormatter(formatter)\n self.log.addHandler(file_handler)\n\n # if we are in verbose mode, then we send log output to console\n if self.verbose:\n # add the console logger for verbose mode\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.log_level)\n console_handler.setFormatter(formatter)\n self.log.addHandler(console_handler)\n\n self.log.info('Logging configured for: %s', self.name)", "def _configure_logging(self):\n pass", "def log_exp_config(xp_path, dataset):\n\n log_file = \"{}/log.txt\".format(xp_path)\n log = open(log_file, \"a\")\n\n log.write(\"Experiment configuration\\n\")\n log.write(\"Dataset: {}\\n\".format(dataset))\n log.write(\"Seed: {}\\n\".format(Cfg.seed))\n log.write(\"Fraction of Outliers: {}\\n\".format(Cfg.out_frac))\n log.write(\"First layer weight init by dictionary: {}\\n\".format(Cfg.weight_dict_init))\n log.write(\"PCA pre-processing? {}\\n\".format(Cfg.pca))\n log.write(\"Norm used: {}\\n\".format(Cfg.unit_norm_used))\n log.write(\"Global contrast normalization? {}\\n\".format(Cfg.gcn))\n log.write(\"ZCA Whitening? {}\\n\".format(Cfg.zca_whitening))\n log.write(\"Number of centroids? {}\\n\".format(Cfg.n_cluster))\n\n if dataset == 'mnist':\n str_normal = str(Cfg.mnist_normal)\n str_outlier = str(Cfg.mnist_outlier)\n if Cfg.mnist_normal == -1:\n str_normal = \"all\"\n if Cfg.mnist_outlier == -1:\n str_outlier = \"all\"\n log.write(\"MNIST classes: {} vs. {}\\n\".format(str_normal, str_outlier))\n log.write(\"MNIST representation dimensionality: {}\\n\".format(Cfg.mnist_rep_dim))\n log.write(\"MNIST Network with bias terms? {}\\n\".format(Cfg.mnist_bias))\n\n if dataset == 'cifar10':\n str_normal = str(Cfg.cifar10_normal)\n str_outlier = str(Cfg.cifar10_outlier)\n if Cfg.cifar10_normal == -1:\n str_normal = \"all\"\n if Cfg.cifar10_outlier == -1:\n str_outlier = \"all\"\n log.write(\"CIFAR-10 classes: {} vs. {}\\n\".format(str_normal, str_outlier))\n log.write(\"CIFAR-10 representation dimensionality: {}\\n\".format(Cfg.cifar10_rep_dim))\n log.write(\"CIFAR-10 Network with bias terms? {}\\n\".format(Cfg.cifar10_bias))\n\n if dataset == 'mobifall':\n str_normal = str(Cfg.mobiFall_normal)\n str_outlier = str(Cfg.mobiFall_outlier)\n log.write(\"MobiFall classes: {} vs. {}\\n\".format(str_normal, str_outlier))\n log.write(\"MobiFall representation dimensionality: {}\\n\".format(Cfg.mobiFall_rep_dim))\n log.write(\"MobiFall Network with bias terms? {}\\n\".format(Cfg.mobiFall_bias))\n\n log.write(\"\\n\\n\")\n log.close()", "def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')", "def setup_logs():\n tf.logging.set_verbosity(FLAGS.log)", "def setup_exp_logging(config, trainer, optimizers, evaluators):\n\n #::: if (it.logger === 'clearml') { :::#\n logger = common.setup_clearml_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'mlflow') { :::#\n logger = common.setup_mlflow_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'neptune') { :::#\n logger = common.setup_neptune_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'polyaxon') { :::#\n logger = common.setup_plx_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'tensorboard') { :::#\n logger = common.setup_tb_logging(\n config.output_dir,\n trainer,\n optimizers,\n evaluators,\n config.log_every_iters,\n )\n #::: } else if (it.logger === 'visdom') { :::#\n logger = common.setup_visdom_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } else if (it.logger === 'wandb') { :::#\n logger = common.setup_wandb_logging(\n trainer, optimizers, evaluators, config.log_every_iters\n )\n #::: } :::#\n return logger", "def log_model_info(log_file: str, full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n # Only write logs on host 0.\n if jax.process_index() != 0:\n return\n\n state_dict = full_train_state.state_dict()\n param_state_dict = state_dict['target']\n total_num_params = jax.tree_util.tree_reduce(\n np.add, jax.tree_map(np.size, param_state_dict))\n\n param_logical_axes = partitioner.get_logical_axes(\n full_train_state).state_dict()['target']\n\n param_mesh_axes = jax.tree_map(\n lambda x: tuple(x) if x is not None else None,\n partitioner.get_mesh_axes(full_train_state).state_dict()['target'])\n\n def _log_info_and_write_to_file(writer, format_str, *args):\n logging.info(format_str, *args)\n writer.write(format_str % args + '\\n')\n\n with gfile.GFile(log_file, 'w') as writer:\n\n # Log params\n def _log_param(name: str, arr: np.ndarray,\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n if logical_axes is None:\n shape_str = str(arr.shape)\n else:\n assert len(logical_axes) == len(arr.shape)\n shape_str = '({})'.format(', '.join(\n f'{name}={dimension}'\n for name, dimension in zip(logical_axes, arr.shape)))\n _log_info_and_write_to_file(\n writer, 'Variable %-80s size %-12s shape %-40s partition spec %s',\n name, arr.size, shape_str, mesh_axes)\n\n jax.tree_map(_log_param, state_utils.get_name_tree(param_state_dict),\n param_state_dict, param_logical_axes, param_mesh_axes)\n\n _log_info_and_write_to_file(writer, 'Total number of parameters: %d',\n total_num_params)\n\n # Add a blank line between params and states.\n _log_info_and_write_to_file(writer, '')\n\n # Log states\n def _log_state(name, arr):\n if arr is None:\n _log_info_and_write_to_file(writer, 'State %-80s None', name)\n else:\n _log_info_and_write_to_file(writer,\n 'State %-80s size %-12s shape %s', name,\n arr.size, arr.shape)\n\n jax.tree_map(_log_state, state_utils.get_name_tree(state_dict['state']),\n state_dict['state'])", "def setup_logging(logfile_name=None, do_logging=True, level=logging.DEBUG):\n # pylint: disable-msg=C0111\n if do_logging and logfile_name:\n logging.basicConfig(level=level, filename=logfile_name, filemode=\"w\",\n datefmt='%a, %d %b %Y %H:%M:%S',\n format=\"%(asctime)s %(name)s %(levelname)-8s %(message)s\")\n else:\n class LogSink(object):\n def write(self, *args, **kwargs):\n pass\n def flush(self, *args, **kwargs):\n pass\n logging.basicConfig(stream=LogSink())", "def setup_logging(verbose=True,logfile=None):\n l=logging.getLogger()\n \n l.setLevel(logging.DEBUG if verbose else logging.INFO)\n \n formatter=logging.Formatter(\"[%(asctime)s] %(levelname)-6s %(name)-35s %(message)s \")\n \n if logfile!=None:\n handler=logging.FileHandler(logfile)\n else:\n handler=logging.StreamHandler()\n \n handler.setFormatter(formatter)\n l.addHandler(handler)", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def startLogging(self):\n logFile = self.options.get(RunnerOptions.logFile, stderr)\n\n fileLogObserverFactory = self.options.get(\n RunnerOptions.fileLogObserverFactory, textFileLogObserver\n )\n\n fileLogObserver = fileLogObserverFactory(logFile)\n\n logLevelPredicate = LogLevelFilterPredicate(\n defaultLogLevel=self.options.get(\n RunnerOptions.defaultLogLevel, LogLevel.info\n )\n )\n\n filteringObserver = FilteringLogObserver(\n fileLogObserver, [logLevelPredicate]\n )\n\n globalLogBeginner.beginLoggingTo([filteringObserver])", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):\n log_config = Path(log_config)\n if log_config.is_file():\n config = read_json(log_config)\n # modify logging paths based on run config\n for _, handler in config['handlers'].items():\n if 'filename' in handler:\n handler['filename'] = str(save_dir / handler['filename'])\n\n logging.config.dictConfig(config)\n else:\n print(\"Warning: logging configuration file is not found in {}.\".format(log_config), file=sys.stderr)\n logging.basicConfig(level=default_level)", "def init_tf_logging(log_path):\n # Create log formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Custom GFile Handler\n class GFileHandler(logging.FileHandler):\n def __init__(self, filename, mode='a', encoding=None, delay=False):\n self.filename = filename\n logging.FileHandler.__init__(self, filename, mode, encoding, delay)\n\n def _open(self):\n return tf.gfile.Open(self.filename, self.mode)\n\n # Create log directory if not existed\n if not tf.gfile.Exists(log_path):\n tf.gfile.MkDir(log_path)\n\n # Create file handler\n fh = GFileHandler(os.path.join(log_path, \"tensorflow-\"\n + time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()) + \".log\"), mode=\"w\")\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n\n # Init logging\n logger = logging.getLogger('tensorflow')\n for handler in logger.handlers:\n logger.removeHandler(handler)\n logger.addHandler(ch)\n logger.addHandler(fh)\n logger.setLevel(logging.DEBUG)", "def enable_log_file():\n\n file_handler = logging.FileHandler(\"run-{}.log\".format(get_time_str()))\n file_handler.setFormatter(FORMATTER)\n\n for logger in LOGGER_TABLE.values():\n logger.addHandler(file_handler)", "def setup_logger(config):\n filename = config[\"LOGGER_FILE\"]\n log_dir = '/'.join(filename.split('/')[0:-1]) + \"/\"\n\n check_and_create_directory(log_dir)\n\n level = config[\"LOGGER_LOGLEVEL\"].upper()\n filemode = 'a'\n _format = '%(asctime)s %(name)8s %(module)15s %(funcName)12s %(' \\\n 'levelname)7s: %(message)s'\n _dateformat = '(%d.%m.%Y, %H:%M:%S)'\n\n logging.basicConfig(filename=filename, filemode=filemode, level=level,\n format=_format, datefmt=_dateformat)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"werkzeug\").setLevel(logging.WARNING)\n\n # Display log simultaneously on console\n if config[\"CONSOLE_LOGGING\"]:\n add_terminal_logging(_format, level)", "def config_logging(self):\n logging.basicConfig(filename='move_dupes.log',\n filemode='a',\n format='%(asctime)s,%(msecs)d ' +\\\n '%(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n logging.info(\"Running audio dupe mover\")\n \n return logging.getLogger('move_dupes')", "def configure_logging():\n class TimeFormatter(logging.Formatter):\n def formatTime(self, record, datefmt=None):\n datefmt = datefmt or '%Y-%m-%d %H:%M:%S'\n return time.strftime(datefmt, time.localtime(record.created))\n\n class SeverityFilter(logging.Filter):\n def filter(self, record):\n record.severity = record.levelname[0]\n return True\n\n if not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n log_file = logging.handlers.RotatingFileHandler(LOG_FILE, backupCount=100)\n log_file.addFilter(SeverityFilter())\n log_file.setFormatter(TimeFormatter('%(asctime)s %(severity)s: %(message)s'))\n logger.addHandler(log_file)\n\n # Log all uncaught exceptions.\n def log_exception(exception_type, value, stack_trace):\n logging.error(\n ''.join(traceback.format_exception(exception_type, value, stack_trace)),\n )\n sys.excepthook = log_exception\n\n # Rotate log files once on startup to get per-execution log files.\n if os.path.exists(LOG_FILE):\n log_file.doRollover()", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)", "def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler(sys.stdout)\n if sys.platform.find('linux') >= 0:\n formatter = ColoredFormatter(cls.COLOR_FORMAT)\n else:\n formatter = ColoredFormatter(cls.NO_COLOR_FORMAT, False)\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(log_filename,\n mode='a+',\n backupCount=3)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s '\n '%(message)s')\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def configure_logging():\n configuration = get_configuration()\n logging.basicConfig(**configuration.get('logging', {}))\n\n logging.debug('Logging configured.')", "def init_logging(input_file_parameters, dir_stacks):\r\n fl_name = '{0}_log_{1}_{2}.txt'.format(NAME,\r\n START_TIME,\r\n input_file_parameters.job_name)\r\n #NOTICE! Current_log_path.path is changed here!\r\n CurrentLogPath.path = os.path.join(input_file_parameters.output_dir,\r\n fl_name)\r\n logging.basicConfig(filename=CurrentLogPath.path, filemode='w',\r\n format='%(asctime)s %(levelname)s:%(message)s',\r\n level=logging.INFO)\r\n logging.info('{0} v. {1} started'.format(NAME, VERSION))\r\n logging.info('Job name: {0}'.format(input_file_parameters.job_name))\r\n logging.info('Starting point directory:\\n{0}'.format(dir_stacks[0]\r\n .path))\r\n logging.info('Output directory:\\n{0}'.format(input_file_parameters.output_dir))\r\n logging.info('-'*80)\r\n logging.info('staplefile contents:\\n{0}'.format('\\n'.join(input_file_parameters.staplefile)))\r\n logging.info('-'*80)\r\n logging.info('config.txt contents:\\n{0}'\r\n .format(utils.get_config_file()))\r\n logging.info('-'*80)", "def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(\n log_filename, mode=\"a+\", backupCount=3\n )\n formatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def setup_logging():\n name_json = 'logging_config.json'\n path_json = os.path.join(os.path.dirname(__file__), name_json)\n with open(path_json, 'r') as f_json:\n dict_config = json.load(f_json)\n logging.config.dictConfig(dict_config)", "def configure(base_path):\n\n log_path = os.path.join(\n base_path,\n 'logs',\n )\n current_time = datetime.datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\")\n\n log_fmt = '%(asctime)s [%(threadName)-12.12s] [%(levelname)-3.4s] %(message)s'\n\n logging.basicConfig(\n level=logging.INFO,\n format=log_fmt,\n handlers=[\n TimedRotatingFileHandler(\n filename=f\"{log_path}/analysis-service.({current_time}).log\",\n encoding='utf-8',\n when=\"d\"\n ),\n logging.StreamHandler()\n ]\n )", "def setup_logging(filepath=core.ServerConfiguration.LOGDIR,\n log_name='server_process'):\n\n if not os.path.exists(filepath):\n raise IOError('LOG_DIR filepath does not exist: {0:s}'.format(filepath))\n\n if not log_name in core.DEFAULT_LOGGER_PROCESSES:\n raise ValueError('Log_name should be in {0:s}.'\n .format(core.DEFAULT_LOGGER_PROCESSES))\n\n filename = generate_log_filename()\n log_file = os.path.join(filepath, filename)\n\n # configure log formatter\n log_fmt = logging.Formatter('[%(levelname)s][%(asctime)s] %(message)s',\n datefmt='%Y/%m/%d %I:%M:%S %p')\n\n # configure file handler\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(log_fmt)\n\n # stream handler\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_fmt)\n\n # setup a server log, add file and stream handlers\n logger = logging.getLogger(log_name)\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n logger.setLevel(logging.DEBUG)\n\n return log_file", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def configLogging():\n # define a basic logger to write to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='/tmp/execute_pomset.log',\n filemode='w')\n\n # end def configureLogging\n pass", "def configure(cls):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger", "def setup_logging():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def configure_logger(logpath, loglevel=logging.DEBUG):\n handlers = [logging.StreamHandler()]\n\n if logpath:\n handlers.append(logging.FileHandler(logpath))\n\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%d-%m-%y %H:%M:%S', level=loglevel, handlers=handlers)", "def configure_logger (max_threads):\n\t\t# Hack for log line separator\n\t\twith open(\"pinger.log\", \"a\") as log:\n\t\t\tlog.write(\n\t\t\t\t\"==============================================================================================\\n\")\n\n\t\tlogging.basicConfig(filename=\"pinger.log\", level=logging.DEBUG, filemode='a',\n\t\t format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%d.%m.%Y %H:%M:%S')\n\t\tlogging.info(\"Started with max threads: %d\", max_threads)", "def _set_up_logging(tag, name):\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))", "def _set_up_logging(tag, name):\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))", "def _set_up_logging(tag, name):\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))", "def config_logging():\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('deepcomp').setLevel(logging.WARNING)\n logging.getLogger('deepcomp.main').setLevel(logging.INFO)\n logging.getLogger('deepcomp.util.simulation').setLevel(logging.INFO)\n # logging.getLogger('deepcomp.env.entities.user').setLevel(logging.DEBUG)\n # logging.getLogger('deepcomp.env.multi_ue.multi_agent').setLevel(logging.DEBUG)\n logging.getLogger('matplotlib').setLevel(logging.WARNING)\n logging.getLogger('tensorflow').setLevel(logging.ERROR)\n gym.logger.set_level(gym.logger.ERROR)\n # structlog.configure(logger_factory=LoggerFactory())\n structlog.configure(logger_factory=LoggerFactory(),\n processors=[\n structlog.stdlib.filter_by_level,\n FloatRounder(digits=LOG_ROUND_DIGITS, not_fields=['sinr', 'signal', 'interference']),\n structlog.dev.ConsoleRenderer()\n ])", "def start_training(self, logdir: str, **info):\n pass", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def setup_logging():\n product_name = \"plasma\"\n logging.setup(cfg.CONF, product_name)\n LOG.info(\"Logging enabled!\")\n LOG.debug(\"command line: %s\", \" \".join(sys.argv))", "def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def logfile(targetfile=\"ros.log\"):\n log = logging.getLogger(__name__)\n log.basicConfig(filename=str(targetfile))", "def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def set_log(self, level=logging.INFO, file=\"\"):\n\n self._logging_level = level\n\n if file:\n self._log_file = file", "def init_log_file(folder_path, suffix=None, log_level=logging.INFO):\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n log_format = '[%(levelname)s]: %(asctime)s %(message)s'\n\n if suffix is not None:\n file_name = timestamp + '_' + suffix\n else:\n file_name = timestamp\n\n file_path = os.path.join(folder_path, file_name)\n logging.basicConfig(filename=file_path, level=log_level, format=log_format)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n return file_path", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def add_param_logs(self, logger):\n if self.config.log_fine_probs:\n plate = ET.SubElement(logger, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"log\", {\n \"idref\":\"featureLikelihood:%s:$(feature)\" % self.name})\n if self.rate_variation:\n ET.SubElement(logger,\"log\",{\"idref\":\"featureClockRatePrior.s:%s\" % self.name})\n ET.SubElement(logger,\"log\",{\"idref\":\"featureClockRateGammaScalePrior.s:%s\" % self.name})\n\n if self.rate_variation:\n plate = ET.SubElement(logger, \"plate\", {\n \"var\":\"feature\",\n \"range\":\",\".join(self.features)})\n ET.SubElement(plate, \"log\", {\n \"idref\":\"featureClockRate:%s:$(feature)\" % self.name})\n # Log the scale, but not the shape, as it is always 1 / scale\n # We prefer the scale because it is positively correlated with extent of variation\n ET.SubElement(logger,\"log\",{\"idref\":\"featureClockRateGammaScale:%s\" % self.name})", "def setup_logs(arg_log_dir, log_level='debug'):\n assert log_level.lower() in ('debug', 'info', 'warning', 'error', 'critical')\n global logger\n cl_logger = log.LogManager(app_name=APP_NAME,\n log_name=__name__,\n log_dir=arg_log_dir)\n logger = cl_logger.logger\n logger.setLevel(log_level.upper())", "def configure_logging(log_level=None, log_fpath=None):\r\n\r\n # disable logging\r\n if not log_level:\r\n logging.disable()\r\n return\r\n\r\n log_level = log_level.upper()\r\n root_logger = logging.getLogger()\r\n root_logger.setLevel(log_level)\r\n\r\n # create formatter for the logs\r\n formatter = logging.Formatter(\"%(asctime)s :: %(levelname)s :: %(name)s :: %(funcName)s() :: %(message)s\")\r\n\r\n # create console logging handler and set its formatting, add it to the root logger\r\n ch = logging.StreamHandler()\r\n ch.setLevel(log_level)\r\n ch.setFormatter(formatter)\r\n root_logger.addHandler(ch)\r\n\r\n # create file logging handler and set its formatting, add it to the root logger\r\n if log_fpath:\r\n fh = logging.FileHandler(log_fpath)\r\n fh.setLevel(log_level)\r\n fh.setFormatter(formatter)\r\n root_logger.addHandler(fh)\r\n\r\n # print first log\r\n if log_fpath is None:\r\n root_logger.info(\"First log: logging to console at %s level.\" % logging.getLevelName(root_logger.getEffectiveLevel()))\r\n else:\r\n root_logger.info(\"First log: logging to console and %s at %s level\" %(log_fpath, logging.getLevelName(root_logger.getEffectiveLevel())))", "def start_logging(self):\n text = _DEFAULT_LOG_CONFIG\n path = self.bindings.get('LOG_CONFIG', None)\n if path:\n try:\n with open(path, 'r') as f:\n text = f.read()\n except Exception as ex:\n print 'ERROR reading LOGGING_CONFIG from {0}: {1}'.format(path, ex)\n raise\n config = ast.literal_eval(args_util.replace(text, self.bindings))\n logging.config.dictConfig(config)\n log_path = os.path.join(\n self.bindings['LOG_DIR'], self.bindings['LOG_FILEBASE'] + '.log')\n os.chmod(log_path, 0600)\n\n self.__journal = global_journal.get_global_journal()\n if self.__journal is None:\n # force start\n journal_path = os.path.join(\n self.bindings['LOG_DIR'],\n self.bindings['LOG_FILEBASE'] + '.journal')\n self.__journal = global_journal.new_global_journal_with_path(journal_path)", "def init_logging(input_dir, file_name):\n create_dir(input_dir)\n config(file_name, log_level=logging.DEBUG)", "def setup_logging(\n module,\n default_level=logging.INFO,\n env_key='LOG_CFG',\n logpath=os.getcwd(),\n config_path=None\n):\n\n if not os.path.exists(os.path.dirname(logpath)):\n os.makedirs(os.path.dirname(logpath))\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")\n fpath = os.path.join(logpath, module, timestamp)\n\n path = config_path if config_path is not None else os.getenv(env_key, None)\n if path is not None and os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n for h in config['handlers'].values():\n if h['class'] == 'logging.FileHandler':\n h['filename'] = os.path.join(logpath, module, timestamp, h['filename'])\n touch(h['filename'])\n for f in config['filters'].values():\n if '()' in f:\n f['()'] = globals()[f['()']]\n logging.config.dictConfig(config)\n else:\n lpath=os.path.join(logpath, timestamp)\n if not os.path.exists(lpath):\n os.makedirs(lpath)\n logging.basicConfig(level=default_level, filename=os.path.join(lpath,\"base.log\"))", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def logging_config(args):\n # Any handlers from a basicConfig, which we will reconfigure.\n for handler in logging.root.handlers:\n logging.root.removeHandler(handler)\n\n level = logging.INFO - 10 * args.verbose + 10 * args.quiet\n # The command-line logging level specifies what goes to stderr.\n root_handler = logging.StreamHandler(sys.stderr)\n fmt = \"%(levelname)s %(asctime)s %(pathname)s:%(lineno)d: %(message)s\"\n datefmt = \"%y-%m-%d %H:%M:%S\"\n root_handler.setFormatter(logging.Formatter(fmt, datefmt))\n root_handler.setLevel(level)\n logging.root.addHandler(root_handler)\n logging.root.setLevel(level)\n\n code_log = _logging_configure_root_log(args.root_dir / args.code_log, level)\n mvid = args.mvid if hasattr(args, \"mvid\") else \"mvid\"\n _logging_configure_mathlog(mvid, args.root_dir / args.epiviz_log)\n _logging_individual_modules(args.logmod, args.modlevel)\n if code_log: # Tell the math log people where the code log is located.\n logging.getLogger(\"cascade.math\").info(f\"Code log is at {code_log}\")", "def log_train_step(self, train_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(train_log, step=step)", "def init_log(log_instance):\r\n base_dir = os.path.dirname(os.path.abspath(__file__))\r\n log_dir = os.path.join(base_dir, \"logs\")\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n log_file = log_instance + \"_\" + datetime.datetime.now().strftime(\"%Y-%m-%d\") + \".log\"\r\n logging_conf = {\r\n \"version\": 1,\r\n \"disable_existing_loggers\": False,\r\n \"formatters\": {\r\n \"simple\": {\r\n 'format': '%(asctime)s [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n 'standard': {\r\n 'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n },\r\n\r\n \"handlers\": {\r\n \"console\": {\r\n \"class\": \"logging.StreamHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"simple\",\r\n \"stream\": \"ext://sys.stdout\"\r\n },\r\n\r\n \"default\": {\r\n \"class\": \"logging.handlers.RotatingFileHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"standard\",\r\n \"filename\": os.path.join(log_dir, log_file),\r\n 'mode': 'w+',\r\n \"maxBytes\": 1024 * 1024 * 5, # 5 MB\r\n \"backupCount\": 20,\r\n \"encoding\": \"utf8\"\r\n },\r\n },\r\n\r\n \"root\": {\r\n 'handlers': ['default', 'console'],\r\n 'level': \"INFO\",\r\n 'propagate': False\r\n }\r\n }\r\n\r\n logging.config.dictConfig(logging_conf)\r\n\r\n # configure application log\r\n return logging.getLogger(log_instance)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def setup_logging():\n log.setup('keystone')", "def _setup_logging(self):\n global log\n\n # Parse the ini file to validate it\n parser = ConfigParser.ConfigParser()\n parser.read(self.ini_file)\n\n # Check for the presence of [loggers] in self.ini_file\n if not parser.has_section('loggers'):\n self._fail('Config file does not have [loggers] section', use_log=False)\n\n logging.config.fileConfig(self.ini_file)\n\n # Use \"name.pid\" to avoid importer confusions in the logs\n logger_name = 'debexpo.importer.%s' % os.getpid()\n log = logging.getLogger(logger_name)", "def set_config(self, file_path_name):\n level = logging.DEBUG\n format = '%(asctime)s %(levelname)-8s %(message)s' \n datefmt = '%a, %d %b %Y %H:%M:%S'\n filemode = 'a'\n \n\n logging.basicConfig(level = level,\n format = format,\n datefmt = datefmt,\n filename = file_path_name,\n filemode = filemode)", "def configure_logging(log_rank: Optional[int], log_level: str):\n level = log_levels[log_level.lower()]\n if MPI is None:\n logging.basicConfig(\n level=level,\n format=\"%(asctime)s [%(levelname)s] %(name)s:%(message)s\",\n handlers=[logging.StreamHandler()],\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n else:\n if log_rank is None or int(log_rank) == MPI.COMM_WORLD.Get_rank():\n logging.basicConfig(\n level=level,\n format=(\n f\"%(asctime)s [%(levelname)s] (rank {MPI.COMM_WORLD.Get_rank()}) \"\n \"%(name)s:%(message)s\"\n ),\n handlers=[logging.StreamHandler()],\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def instantiate_logs(self):\n\n # Log file\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H%M%S\")\n self.log_dir = os.path.join(\"experiment_logs\", timestamp)\n\n # Create Log directory if it does not exist\n try:\n os.makedirs(self.log_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n self.info_file = os.path.join(self.log_dir, \"run_info.txt\")\n self.log_file = os.path.join(self.log_dir, \"data.csv\")\n\n with open(self.info_file, \"w+\") as f:\n f.write(\"Period = {}\\nMaxVel = {}\".format(self.period, self.max_vel))\n\n self.log_file_desc = open(self.log_file, \"w+\")\n self.log_file_desc.write(\"t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw\")", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def setup_logging():\n logging.basicConfig(\n filename=os.getenv(\"SERVICE_LOG\", \"server.log\"),\n level=logging.DEBUG,\n format=\"%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s\",\n datefmt=\"%d/%m/%y %H:%M:%S\",\n )", "def configure_logger(debug=False, logfile=None, verbose=False):\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n\n stream = logging.StreamHandler(sys.stdout)\n if debug and verbose:\n stream.setLevel(logging.DEBUG)\n elif verbose:\n stream.setLevel(logging.INFO)\n else:\n stream.setLevel(logging.WARNING)\n\n stream.setFormatter(logging.Formatter(\"%(asctime)s - %(message)s\"))\n root.addHandler(stream)\n\n if logfile:\n file = logging.FileHandler(logfile, \"a\")\n if debug:\n file.setLevel(logging.DEBUG)\n else:\n file.setLevel(logging.INFO)\n\n file.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n root.addHandler(file)", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def init_logging(filepath=os.path.dirname(os.path.abspath(__file__))):\r\n logfile = filepath\r\n logfile += '\\\\Logs\\\\'\r\n if not os.path.isdir(logfile):\r\n os.makedirs(logfile)\r\n logfile += datetime.now().strftime('%m-%d-%Y') + '_File_Moving.log'\r\n with open(logfile, 'w'):\r\n pass\r\n logging.basicConfig(filename=logfile, level=logging.DEBUG,\r\n format='%(levelname)s: -- %(asctime)s -- %(message)s',\r\n datefmt='%m/%d/%Y %H:%M:%S %p')", "def send_log():\n log.info(f\"UUID={UUID}\")\n log.info(f\"SPLIT={SPLIT}\")\n log.info(f\"BATCH_SIZE={BATCH_SIZE}\")\n log.info(f\"EPOCHS={EPOCHS}\")\n log.info(f\"PATIENCE={PATIENCE}\")\n log.info(f\"X_FREQ={X_FREQ}\")\n log.info(f\"LOOK_BACK={LOOK_BACK}\")\n log.info(f\"LOOK_AHEAD={LOOK_AHEAD}\")\n log.info(f\"KERNEL_SIZE={KERNEL_SIZE}\")\n log.info(f\"FILTERS={FILTERS}\")\n log.info(f\"L1L2={L1L2}\")\n log.info(f\"D1={D1}\")\n log.info(f\"D2={D2}\")\n log.info(f\"DOUT={DOUT}\")\n log.info(f\"PLOT={PLOT}\")\n log.info(f\"SHUFFLE={SHUFFLE}\")", "def enable_logging(self, log_dest, prefix=\"\"):\n self.log_dest = log_dest\n self.log_prefix = prefix", "def config_logger(logger, name='log', filename_pattern=FILENAME_PATTERN):\n formatter = logging.Formatter(\n '%(levelname)s'\n '-%(asctime)s'\n '-%(pathname)s'\n '-%(funcName)s'\n '-%(lineno)d'\n ': %(message)s'\n )\n\n log_filename = os.path.expanduser(filename_pattern.format(name))\n log_dir = os.path.dirname(log_filename)\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n handler = logging.FileHandler(log_filename)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger('biosimulators-utils-' + name)\n logger.addHandler(handler)\n\n logger.log(logging.INFO, (\n '\\n'\n '\\n'\n '===============\\n'\n 'Log initialized\\n'\n '==============='\n ))", "def setup_logging():\n formatter = logging.Formatter(LOG_FORMAT)\n level = logging.INFO\n\n file_handler = logging.FileHandler('db.log')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n console_handler.setLevel(level)\n\n logger = logging.getLogger()\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n logger.setLevel(level)", "def log_settings(config):\n LOGGER.propagate = False\n formatter = ViseronLogFormat(config.logging)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.addFilter(DuplicateFilter())\n LOGGER.addHandler(handler)\n\n LOGGER.setLevel(LOG_LEVELS[config.logging.level])\n logging.getLogger(\"apscheduler.scheduler\").setLevel(logging.ERROR)\n logging.getLogger(\"apscheduler.executors\").setLevel(logging.ERROR)", "def autolog(\n every_n_iter=1,\n log_models=True,\n disable=False,\n exclusive=False,\n disable_for_unsupported_versions=False,\n silent=False,\n): # pylint: disable=unused-argument\n # pylint: disable=E0611\n import tensorflow\n\n global _LOG_EVERY_N_STEPS\n _LOG_EVERY_N_STEPS = every_n_iter\n\n atexit.register(_flush_queue)\n\n if Version(tensorflow.__version__) < Version(\"1.12\"):\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n try:\n from tensorflow.python.summary.writer.event_file_writer import EventFileWriter\n from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2\n from tensorflow.python.saved_model import tag_constants\n from tensorflow.python.summary.writer.writer import FileWriter\n except ImportError:\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n def train(original, self, *args, **kwargs):\n active_run = mlflow.active_run()\n global _AUTOLOG_RUN_ID\n _AUTOLOG_RUN_ID = active_run.info.run_id\n\n # Checking step and max_step parameters for logging\n if len(args) >= 3:\n mlflow.log_param(\"steps\", args[2])\n if len(args) >= 4:\n mlflow.log_param(\"max_steps\", args[3])\n if \"steps\" in kwargs:\n mlflow.log_param(\"steps\", kwargs[\"steps\"])\n if \"max_steps\" in kwargs:\n mlflow.log_param(\"max_steps\", kwargs[\"max_steps\"])\n\n result = original(self, *args, **kwargs)\n\n # Flush the metrics queue after training completes\n _flush_queue()\n\n # Log Tensorboard event files as artifacts\n if os.path.exists(self.model_dir):\n for file in os.listdir(self.model_dir):\n if \"tfevents\" not in file:\n continue\n mlflow.log_artifact(\n local_path=os.path.join(self.model_dir, file),\n artifact_path=\"tensorboard_logs\",\n )\n return result\n\n def export_saved_model(original, self, *args, **kwargs):\n global _AUTOLOG_RUN_ID\n if _AUTOLOG_RUN_ID:\n _logger.info(\n \"Logging TensorFlow Estimator as MLflow Model to run with ID '%s'\", _AUTOLOG_RUN_ID\n )\n\n serialized = original(self, *args, **kwargs)\n\n def log_model_without_starting_new_run():\n \"\"\"\n Performs the exact same operations as `log_model` without starting a new run\n \"\"\"\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )\n\n log_model_without_starting_new_run()\n\n _AUTOLOG_RUN_ID = None\n\n return serialized\n\n @picklable_exception_safe_function\n def _get_early_stop_callback(callbacks):\n for callback in callbacks:\n if isinstance(callback, tensorflow.keras.callbacks.EarlyStopping):\n return callback\n return None\n\n def _log_early_stop_callback_params(callback):\n if callback:\n try:\n earlystopping_params = {\n \"monitor\": callback.monitor,\n \"min_delta\": callback.min_delta,\n \"patience\": callback.patience,\n \"baseline\": callback.baseline,\n \"restore_best_weights\": callback.restore_best_weights,\n }\n mlflow.log_params(earlystopping_params)\n except Exception: # pylint: disable=W0703\n return\n\n def _get_early_stop_callback_attrs(callback):\n try:\n return callback.stopped_epoch, callback.restore_best_weights, callback.patience\n except Exception: # pylint: disable=W0703\n return None\n\n def _log_early_stop_callback_metrics(callback, history, metrics_logger):\n if callback is None or not callback.model.stop_training:\n return\n\n callback_attrs = _get_early_stop_callback_attrs(callback)\n if callback_attrs is None:\n return\n\n stopped_epoch, restore_best_weights, _ = callback_attrs\n metrics_logger.record_metrics({\"stopped_epoch\": stopped_epoch})\n\n if not restore_best_weights or callback.best_weights is None:\n return\n\n monitored_metric = history.history.get(callback.monitor)\n if not monitored_metric:\n return\n\n initial_epoch = history.epoch[0]\n # If `monitored_metric` contains multiple best values (e.g. [0.1, 0.1, 0.2] where 0.1 is\n # the minimum loss), the epoch corresponding to the first occurrence of the best value is\n # the best epoch. In keras > 2.6.0, the best epoch can be obtained via the `best_epoch`\n # attribute of an `EarlyStopping` instance: https://github.com/keras-team/keras/pull/15197\n restored_epoch = initial_epoch + monitored_metric.index(callback.best)\n metrics_logger.record_metrics({\"restored_epoch\": restored_epoch})\n restored_index = history.epoch.index(restored_epoch)\n restored_metrics = {\n key: metrics[restored_index] for key, metrics in history.history.items()\n }\n # Checking that a metric history exists\n metric_key = next(iter(history.history), None)\n if metric_key is not None:\n metrics_logger.record_metrics(restored_metrics, stopped_epoch + 1)\n\n class FitPatch(PatchFunction):\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"x\", \"y\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n early_stop_callback = None\n\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 6:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[5])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[5] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n early_stop_callback = _get_early_stop_callback(callbacks)\n _log_early_stop_callback_params(early_stop_callback)\n\n history = original(inst, *args, **kwargs)\n\n _log_early_stop_callback_metrics(\n callback=early_stop_callback,\n history=history,\n metrics_logger=metrics_logger,\n )\n\n _flush_queue()\n mlflow.log_artifacts(\n local_dir=self.log_dir.location,\n artifact_path=\"tensorboard_logs\",\n )\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return history\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n class FitGeneratorPatch(PatchFunction):\n \"\"\"\n NOTE: `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n To avoid unintentional creation of nested MLflow runs caused by a patched\n `fit_generator()` method calling a patched `fit()` method, we only patch\n `fit_generator()` in TF < 2.1.0.\n \"\"\"\n\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"generator\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n\n run_id = mlflow.active_run().info.run_id\n\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 5:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[4])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[4] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n result = original(inst, *args, **kwargs)\n\n _flush_queue()\n mlflow.log_artifacts(local_dir=self.log_dir.location, artifact_path=\"tensorboard_logs\")\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return result\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n def add_event(original, self, event):\n _log_event(event)\n return original(self, event)\n\n def add_summary(original, self, *args, **kwargs):\n result = original(self, *args, **kwargs)\n _flush_queue()\n return result\n\n managed = [\n (tensorflow.estimator.Estimator, \"train\", train),\n (tensorflow.keras.Model, \"fit\", FitPatch),\n ]\n\n if Version(tensorflow.__version__) < Version(\"2.1.0\"):\n # `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n # To avoid unintentional creation of nested MLflow runs caused by a patched\n # `fit_generator()` method calling a patched `fit()` method, we only patch\n # `fit_generator()` in TF < 2.1.0\n managed.append((tensorflow.keras.Model, \"fit_generator\", FitGeneratorPatch))\n\n non_managed = [\n (EventFileWriter, \"add_event\", add_event),\n (EventFileWriterV2, \"add_event\", add_event),\n (FileWriter, \"add_summary\", add_summary),\n (tensorflow.estimator.Estimator, \"export_saved_model\", export_saved_model),\n (tensorflow.estimator.Estimator, \"export_savedmodel\", export_saved_model),\n ]\n\n # Add compat.v1 Estimator patching for versions of tensfor that are 2.0+.\n if Version(tensorflow.__version__) >= Version(\"2.0.0\"):\n old_estimator_class = tensorflow.compat.v1.estimator.Estimator\n v1_train = (old_estimator_class, \"train\", train)\n v1_export_saved_model = (old_estimator_class, \"export_saved_model\", export_saved_model)\n v1_export_savedmodel = (old_estimator_class, \"export_savedmodel\", export_saved_model)\n\n managed.append(v1_train)\n non_managed.append(v1_export_saved_model)\n non_managed.append(v1_export_savedmodel)\n\n for p in managed:\n safe_patch(FLAVOR_NAME, *p, manage_run=True)\n\n for p in non_managed:\n safe_patch(FLAVOR_NAME, *p)", "def _configure_logging(self):\n logger = logging.getLogger('BatchAppsBlender')\n\n console_format = logging.Formatter(\n \"BatchApps: [%(levelname)s] %(message)s\")\n\n file_format = logging.Formatter(\n \"%(asctime)-15s [%(levelname)s] %(module)s: %(message)s\")\n\n console_logging = logging.StreamHandler()\n console_logging.setFormatter(console_format)\n logger.addHandler(console_logging)\n\n logfile = os.path.join(self.props.data_dir, \"batch_apps.log\")\n\n file_logging = logging.FileHandler(logfile)\n file_logging.setFormatter(file_format)\n logger.addHandler(file_logging)\n\n logger.setLevel(int(self.props.log_level))\n return logger" ]
[ "0.70399964", "0.6622312", "0.65885615", "0.65304697", "0.65240383", "0.6519144", "0.6495567", "0.6466729", "0.64612895", "0.6407961", "0.63969386", "0.63589936", "0.6348098", "0.6338948", "0.63224775", "0.6312327", "0.6312132", "0.63118017", "0.6299318", "0.6270785", "0.6250178", "0.62284213", "0.6216222", "0.6185489", "0.6175854", "0.6170327", "0.61596316", "0.6146417", "0.61109585", "0.6110498", "0.61104816", "0.6108161", "0.6099329", "0.60909915", "0.6090552", "0.6076548", "0.6075772", "0.60714483", "0.6068392", "0.6067186", "0.6065199", "0.6061248", "0.60476756", "0.6042017", "0.60415924", "0.60355335", "0.60078466", "0.5999489", "0.5999119", "0.5990776", "0.59902877", "0.59872454", "0.5983561", "0.59726894", "0.59705496", "0.59705496", "0.59705496", "0.5967591", "0.596575", "0.5965589", "0.5955951", "0.5939639", "0.59349", "0.5926834", "0.5924582", "0.59125525", "0.5901398", "0.5894757", "0.58841646", "0.5870775", "0.58692306", "0.5867839", "0.5860622", "0.585891", "0.58544666", "0.58492094", "0.5847256", "0.5845907", "0.5844788", "0.58433825", "0.5843013", "0.5842794", "0.5841009", "0.58397365", "0.58379066", "0.5836929", "0.5833978", "0.5833872", "0.5830069", "0.58296335", "0.5824715", "0.5822257", "0.58204126", "0.5806433", "0.5804612", "0.57977116", "0.57963926", "0.57952523", "0.57921374", "0.57863045" ]
0.64892864
7
Load data using PyTorch DataLoader.
def load_data(config, vocab, proportion: float=0.7, max_len: int=256, partition: dict=None, labels: dict=None): # columns if meta: [0] unique ID, [1] text, [2] metadata, [3] label # columns if no meta: [0] unique ID, [1] text, [2] label if config["metadata"]: unique_id_col = 0 text_col = 1 metadata_col = 2 label_col = 3 else: unique_id_col = 0 text_col = 1 label_col = 3 dataset = pd.read_csv(config['train_file'], header=None, sep='\t') print(dataset) # below fix null values wrecking encode_plus # convert labels to integer and drop nas dataset.iloc[:, label_col] = pd.to_numeric(dataset.iloc[:, label_col], errors = 'coerce' ) dataset = dataset[~ dataset[text_col].isnull()] # recreate the first column with the reset index. dataset = dataset[(dataset.iloc[:, label_col] == 1) | (dataset.iloc[:, label_col] == 0)] \ .reset_index().reset_index().drop(columns = ['index', 0]).rename(columns = {'level_0': 0}) print(dataset) # create list of train/valid IDs if not provided if not partition and not labels: ids = list(dataset.iloc[:,unique_id_col]) total_len = len(ids) np.random.shuffle(ids) labels = {} # metadata = {} partition = {'train': ids[ :int(total_len * 0.7)], 'valid': ids[int(total_len * 0.7): ] } for i in dataset.iloc[:, unique_id_col]: labels[i] = dataset.iloc[i][label_col] # set parameters for DataLoader -- num_workers = cores params = {'batch_size': 32, 'shuffle': True, 'num_workers': 0 } tokenizer = AutoTokenizer.from_pretrained(vocab) dataset[text_col] = dataset[text_col].apply(lambda x: tokenizer.encode_plus(str(x), \ max_length=max_len, \ add_special_tokens=True, \ pad_to_max_length=True, \ truncation=True)) if config['metadata']: # glove for metadata preprocessing glove = torchtext.vocab.GloVe(name="6B", dim=50) dataset[metadata_col] = dataset[metadata_col].apply(lambda y: __pad__(str(y).split(" "), 30)) dataset[metadata_col] = dataset[metadata_col].apply(lambda z: __glove_embed__(z, glove)) train_data = dataset[dataset[unique_id_col].isin(partition['train'])] valid_data = dataset[dataset[unique_id_col].isin(partition['valid'])] # create train/valid generators training_set = AbstractDataset(data=train_data, labels=labels, metadata=config['metadata'], list_IDs=partition['train'], max_len = max_len) training_generator = DataLoader(training_set, **params) validation_set = AbstractDataset(data=valid_data, labels=labels, metadata=config['metadata'], list_IDs=partition['valid'],max_len = max_len) validation_generator = DataLoader(validation_set, **params) return partition, training_generator, validation_generator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_loader(data, train=True):\n\n loader_config = {\n 'batch_size':64,\n 'shuffle':train\n }\n \n return torch.utils.data.DataLoader(data, **loader_config)", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def load_torch_data(load_data_func):\n\n def torch_loader(dataset, data_path, batch_size, shuffle=True, cuda_device=None, num_workers=1):\n (train_data, val_data), (train_labels, val_labels), label_names = load_data_func(dataset, data_path)\n\n kwargs = {'num_workers': num_workers, 'pin_memory': True} if cuda_device is not None else {}\n kwargs['drop_last'] = True\n\n if type(train_data) == numpy.ndarray:\n train_dataset = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_labels))\n val_dataset = TensorDataset(torch.from_numpy(val_data), torch.from_numpy(val_labels))\n elif type(train_data) == scipy.sparse.csr.csr_matrix:\n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_trans = TfidfTransformer(norm=None)\n tfidf_trans.fit(train_data)\n train_dataset = SparseDataset(train_data, tfidf_trans.idf_)\n val_dataset = SparseDataset(val_data, tfidf_trans.idf_)\n else:\n train_dataset = torchvision.datasets.ImageFolder(train_data)\n val_dataset = torchvision.datasets.ImageFolder(val_data)\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, **kwargs)\n val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\n return train_loader, val_loader, label_names\n\n return torch_loader", "def torch_dataset_loader(dataset, batch_size, shuffle, kwargs):\n loader = DataLoader(TorchData(dataset),\n batch_size=batch_size,\n shuffle=shuffle,\n **kwargs)\n return loader", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def get_loader(data, json, batch_size, shuffle, num_workers):\n dataset = FinNumDataset(data, json)\n\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def data_loader(root, batch_size=64):\n input_transform = get_transform()\n dataset = CustomDataset(root, input_transform)\n return data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False)", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def load_data(dataset, root, batch_size, workers):\n # Data transform\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n query_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n # Construct data loader\n index = dataset.index(\"IF\")\n sub = dataset[index:]\n if sub == 'IF100':\n train_dir = os.path.join(root, 'train-IF100')\n elif sub == 'IF50':\n train_dir = os.path.join(root, 'train-IF50')\n elif sub == 'IF20':\n train_dir = os.path.join(root, 'train-IF20')\n elif sub == 'IF10':\n train_dir = os.path.join(root, 'train-IF10')\n elif sub == 'IF1':\n train_dir = os.path.join(root, 'train-IF1')\n else:\n print('train path error')\n return\n # train_dir = os.path.join(root, 'train')\n query_dir = os.path.join(root, 'query')\n database_dir = os.path.join(root, 'database')\n\n train_dataset = ImagenetDataset(\n train_dir,\n transform=train_transform,\n targets_transform=Onehot(100),\n )\n\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=workers,\n pin_memory=True,\n )\n\n query_dataset = ImagenetDataset(\n query_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n database_dataset = ImagenetDataset(\n database_dir,\n transform=query_transform,\n targets_transform=Onehot(100),\n )\n\n database_dataloader = DataLoader(\n database_dataset,\n batch_size=batch_size,\n num_workers=workers,\n pin_memory=True,\n )\n\n return train_dataloader, query_dataloader, database_dataloader", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def load_data(path):\n # Training Images Details\n IMG_SIZE = 224 # Size of images used for training\n IMG_MEAN = [0.485, 0.456, 0.406] # image normalization mean\n IMG_SDEV = [0.229, 0.224, 0.225] # image normalization standard deviation\n\n # Training phases\n phases = ['train', 'valid', 'test']\n\n # Define data locations\n data_dir = {n: path + n for n in phases}\n\n # Define transforms for the training, validation, and testing sets\n data_transforms = {\n 'train':\n transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(IMG_SIZE),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'valid':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'test':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)])\n }\n\n # Load the datasets\n image_datasets = {n: datasets.ImageFolder(\n data_dir[n], transform=data_transforms[n])\n for n in phases}\n\n # Create the PyTorch dataloaders\n dataloaders = {n: torch.utils.data.DataLoader(\n image_datasets[n], batch_size=64, shuffle=True)\n for n in phases}\n\n # mapping of classes to training indices\n class_to_idx = image_datasets['train'].class_to_idx\n\n return dataloaders, class_to_idx", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def load_data(dataset_class, batch_size, shuffle=True, num_workers=4):\n loader = torch.utils.data.TensorDataset(dataset_class.data.float(),\n dataset_class.labels.long(),\n dataset_class.adjacent_matrix.float())\n\n loader_dataset = torch.utils.data.DataLoader(loader,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n return loader_dataset", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def train_dataloader(self):\r\n\r\n # transformation\r\n train_transform = Compose(\r\n [\r\n ApplyTransformToKey(\r\n key='video',\r\n transform=Compose(\r\n [\r\n UniformTemporalSubsample(8),\r\n Lambda(lambda x: x / 255.0),\r\n Normalize((0.45, 0.45, 0.45), (0.225, 0.225, 0.225)),\r\n RandomShortSideScale(min_size=256, max_size=320),\r\n RandomCrop(244),\r\n RandomHorizontalFlip(p=0.5),\r\n ]\r\n )\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = pv.data.Kinetics(\r\n data_path=os.path.join(self._DATA_PATH, \"train\"),\r\n clip_sampler=pv.data.make_clip_sampler(\"random\", self._CLIP_DURATION),\r\n decode_audio=False,\r\n transform=train_transform\r\n )\r\n return torch.utils.data.DataLoader(\r\n train_dataset,\r\n batch_size=self._BATCH_SIZE,\r\n num_workers=self._NUM_WORKERS,\r\n )", "def view(\n self,\n collate_fn: Union[callable, str] = \"batch_of_g_and_y\",\n *args,\n **kwargs\n ):\n # provide default collate function\n if isinstance(collate_fn, str):\n collate_fn = getattr(self, collate_fn)\n\n return torch.utils.data.DataLoader(\n dataset=self,\n collate_fn=collate_fn,\n *args,\n **kwargs,\n )", "def _load(self, dataset):\n raise NotImplementedError('Loader {} does not support loading datasets.'.format(self.type()))", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):\n\n\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n\n # Checking the params values. If it's not defined in params of if params is None, the default values are described\n # below:\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n\n # However, if the params is defined, we used the values described on it:\n if (params is not None):\n if ('batch_size' in params.keys()):\n batch_size = params['batch_size']\n if ('shuf' in params.keys()):\n shuf = params['shuf']\n if ('num_workers' in params.keys()):\n num_workers = params['num_workers']\n if ('pin_memory' in params.keys()):\n pin_memory = params['pin_memory']\n\n # Calling the dataloader\n dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,\n pin_memory=pin_memory)\n\n return dl", "def load_dataset(data_dir='flowers'):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n # Apply transformations on training set, leave alone validation and testing sets:\n data_transforms = {\n \"training\" : transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n # For validation and tesing sets, since they are the \"unseen\" data that used to measure the model performance, so they should not be applied by any transformations, however, resizing is stil needed.\n \"validation\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n \"testing\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n }\n \n # Load datasets with ImageFolder:\n image_datasets = {\n \"training\" : datasets.ImageFolder(train_dir, transform = data_transforms[\"training\"]),\n \"validation\" : datasets.ImageFolder(valid_dir, transform = data_transforms[\"validation\"]),\n \"testing\" : datasets.ImageFolder(test_dir, transform = data_transforms[\"testing\"])\n }\n \n # Using the image datasets and the trainforms, define the dataloaders: \n dataloaders = {\n \"training\" : torch.utils.data.DataLoader(image_datasets[\"training\"], batch_size = 64, shuffle = True),\n \"validation\" : torch.utils.data.DataLoader(image_datasets[\"validation\"], batch_size = 64),\n \"testing\" : torch.utils.data.DataLoader(image_datasets[\"testing\"], batch_size = 64)\n }\n \n return (dataloaders['training'],\n dataloaders['validation'],\n dataloaders['testing'],\n image_datasets['training'],\n image_datasets['validation'],\n image_datasets['testing'])", "def load_data(data_dir):\n \n #Define training, validation, and testing directories, structured for use with ImageFolder Class\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n #Define image transforms for training, validation, and testing\n training_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n validation_transforms = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n testing_transforms = validation_transforms\n\n\n #Load the datasets with ImageFolder\n training_data = datasets.ImageFolder(train_dir, transform = training_transforms)\n validation_data = datasets.ImageFolder(valid_dir, transform = validation_transforms)\n testing_data = datasets.ImageFolder(test_dir, transform = testing_transforms)\n\n #Using the image datasets and the trainforms, define the dataloaders\n training_loader = torch.utils.data.DataLoader(training_data, batch_size = 64, shuffle = True)\n validation_loader = torch.utils.data.DataLoader(validation_data, batch_size = 64, shuffle = False)\n testing_loader = torch.utils.data.DataLoader(testing_data, batch_size = 64, shuffle = False)\n \n return training_loader, validation_loader, testing_loader", "def get_data_loader_from_data(cls, batch_size, X, Y, **kwargs):\n X_torch = torch.from_numpy(X).float()\n\n if (\n \"classification_problem\" in kwargs\n and kwargs[\"classification_problem\"] == False\n ):\n Y_torch = torch.from_numpy(Y).float()\n else:\n Y_torch = torch.from_numpy(Y).long()\n dataset = TensorDataset(X_torch, Y_torch)\n kwargs.pop(\"classification_problem\", None)\n return DataLoader(dataset, batch_size=batch_size, **kwargs)", "def _load_dataset(self, data_path, augmentation, batch_size):\n if path.split(data_path)[1] == \"\":\n # Deal with edge case where there's a \"/\" at the end of the path.\n data_path = path.split(data_path)[0]\n\n if path.split(data_path)[1].endswith(\"training\"):\n dataset_name = \"training dataset\"\n else:\n dataset_name = \"validation dataset\"\n\n start_time = time.time()\n self._update_status(\"Loading {}.\".format(dataset_name))\n\n\n dataset = MapillaryDataset(data_path, augmentation, self.iaa)\n data_loader = DataLoader(dataset,\n batch_size,\n shuffle=True)\n\n self._update_status(\"{} loaded. ({} ms)\".format(\n dataset_name.capitalize(),\n int((time.time() - start_time) * 1000)))\n\n return data_loader", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def load_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n # define your transforms for the training, validation, and testing sets\n data_transforms_training = transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_transforms_validation = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_transforms_test = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n # Load the datasets with ImageFolder\n image_datasets_training = datasets.ImageFolder(train_dir, transform=data_transforms_training)\n image_datasets_validation = datasets.ImageFolder(valid_dir, transform=data_transforms_validation)\n image_datasets_test = datasets.ImageFolder(test_dir, transform=data_transforms_test)\n\n # Using the image datasets and the trainforms, define the dataloaders\n dataloaders_training = torch.utils.data.DataLoader(image_datasets_training, shuffle=True, batch_size=128)\n dataloaders_validation = torch.utils.data.DataLoader(image_datasets_validation, shuffle=True, batch_size=128)\n dataloaders_test = torch.utils.data.DataLoader(image_datasets_test, shuffle=True, batch_size=128)\n\n return {\"training_dataloader\": dataloaders_training,\n \"validation_dataloader\": dataloaders_validation,\n \"testing_dataloader\": dataloaders_test,\n \"class_to_idx\": image_datasets_training.class_to_idx}", "def get_loader(\n data_source: Iterable[dict],\n open_fn: Callable,\n dict_transform: Callable = None,\n sampler=None,\n collate_fn: Callable = default_collate_fn,\n batch_size: int = 32,\n num_workers: int = 4,\n shuffle: bool = False,\n drop_last: bool = False,\n):\n from catalyst.data.dataset import ListDataset\n\n dataset = ListDataset(\n list_data=data_source, open_fn=open_fn, dict_transform=dict_transform,\n )\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n sampler=sampler,\n collate_fn=collate_fn,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=shuffle,\n pin_memory=torch.cuda.is_available(),\n drop_last=drop_last,\n )\n return loader", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def val_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_valid, **self.dl_kwargs)", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def get_loader(dataset='train.txt', crop_size=128, image_size=28, batch_size=2, mode='train', num_workers=1): \n transform = [] \n if mode == 'train': \n transform.append(transforms.RandomHorizontalFlip()) \n transform.append(transforms.CenterCrop(crop_size)) \n transform.append(transforms.Resize(image_size)) \n transform.append(transforms.ToTensor()) \n transform.append(transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))) \n transform = transforms.Compose(transform) \n train_data=MyDataset(txt=dataset, transform=transform) \n data_loader = DataLoader(dataset=train_data, \n batch_size=batch_size, \n shuffle=(mode=='train'), \n num_workers=num_workers) \n return data_loader", "def data_loader(edges,features,y):\n\n\n edge_index = torch.tensor(edges, dtype=torch.long)\n edge_index = edge_index.t().contiguous()\n x = torch.tensor(features.todense(), dtype=torch.float)\n\n y = torch.tensor(y)\n\n data = Data(x=x, edge_index=edge_index, y = y)\n\n return data", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def get_train_loader(batch_size, train_set, train_sampler):\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, sampler=train_sampler, num_workers=4)\n\n return train_loader", "def make_standard_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.batch_size,\n shuffle=False,\n drop_last=False,\n pin_memory=not (cfg.DEBUG > 0),\n num_workers=self.num_workers,\n )", "def load_dataset(args, corpus_type, shuffle):\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n\n def _lazy_dataset_loader(pt_file, corpus_type):\n dataset = torch.load(pt_file)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt_file, len(dataset)))\n return dataset\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(args.data_path + '.' + corpus_type + '.[0-9]*.pt'))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n yield _lazy_dataset_loader(pt, corpus_type)\n else:\n # Only one inputters.*Dataset, simple!\n pt = args.data_path + '.' + corpus_type + '.pt'\n yield _lazy_dataset_loader(pt, corpus_type)", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def get_dataloader(hp: HParams) \\\n -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, int]:\n if hp.data.dataset == \"podcast\":\n dataset = podcast.PODCAST(root=hp.data.path,\n audio_folder=hp.data.audio_folder,\n text_file=hp.data.text_file)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n\n # https://towardsdatascience.com/7-tips-for-squeezing-maximum-performance-from-pytorch-ca4a40951259\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"librispeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = librispeech.download_data(root=hp.data.path, url=hp.data.url)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n elif hp.data.dataset == \"ljspeech\":\n Path(hp.data.path).mkdir(parents=True, exist_ok=True)\n dataset = ljspeech.download_data(root=hp.data.path)\n length = len(dataset)\n train_length = int(0.9 * length)\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_length,\n length - train_length])\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=hp.training.batch_size,\n shuffle=False,\n num_workers=hp.training.num_workers,\n collate_fn=collatedata.AudioCollatePodcast(),\n pin_memory=True)\n\n return train_dataloader, test_dataloader, int(0.9 * length)\n\n else:\n raise Exception(f\"Dataset {hp.data.dataset} does not exist\")", "def make_dataloaders(params):\r\n transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.4914, 0.4822, 0.4465],\r\n [0.2023, 0.1994, 0.2010])])\r\n\r\n transform_validation = transforms.Compose([transforms.ToTensor()])\r\n\r\n trainset = torchvision.datasets.CIFAR10(root=params['path'], train=True, transform=transform_train)\r\n testset = torchvision.datasets.CIFAR10(root=params['path'], train=False, transform=transform_validation)\r\n\r\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, num_workers=4)\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=params['batch_size'], shuffle=False, num_workers=4)\r\n return trainloader, testloader", "def dataloader(self):\n return DataLoader", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def build_training_data_loader(self) -> DataLoader:\n pass", "def get_dataloader(\n path='data/interim/data.json',\n train_test_split=0.8,\n batch_size_train=5,\n batch_size_test=5,\n shuffle_train=True,\n perm_images=True,\n transform_train=True,\n transform_test=False,\n class_names=None):\n\n # use our dataset and defined transformations\n if class_names is None:\n dataset = ProdigyDataReader(path, get_transforms(train=transform_train))\n dataset_test = ProdigyDataReader(path, get_transforms(train=transform_test))\n else:\n dataset = ProdigyDataReader(path, get_transforms(train=transform_train),\n object_categories=class_names)\n dataset_test = ProdigyDataReader(path, get_transforms(train=transform_test),\n object_categories=class_names)\n\n # split the dataset in train and test set\n if perm_images:\n indices = torch.randperm(len(dataset)).tolist()\n else:\n indices = list(range(len(dataset)))\n\n len_train = int(len(indices) * train_test_split)\n dataset = torch.utils.data.Subset(dataset, indices[:len_train])\n dataset_test = torch.utils.data.Subset(dataset_test, indices[len_train:])\n\n # define training and validation data loaders\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size_train, shuffle=shuffle_train, num_workers=0,\n collate_fn=collate_fn)\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=batch_size_test, shuffle=False, num_workers=0,\n collate_fn=collate_fn)\n\n return [data_loader, data_loader_test]", "def load_torchvision_data(data, label, test_size = 0.2, valid_size=0.1, splits=None, shuffle=True,\n stratified=False, random_seed=None, batch_size = 64,\n maxsize = None, maxsize_test=None, num_workers = 0):\n\n x_train, x_test, y_train, y_test = train_test_split(data, label, test_size = test_size, stratify=label)\n train, test = CERDataset(x_train, y_train), CERDataset(x_test, y_test)\n\n if type(train.targets) is list or type(train.targets) is np.ndarray:\n train.targets = torch.LongTensor(train.targets)\n test.targets = torch.LongTensor(test.targets)\n\n if not hasattr(train, 'classes') or not train.classes:\n train.classes = sorted(torch.unique(train.targets).tolist())\n test.classes = sorted(torch.unique(train.targets).tolist())\n\n ### Data splitting\n fold_idxs = {}\n if splits is None and valid_size == 0:\n ## Only train\n fold_idxs['train'] = np.arange(len(train))\n elif splits is None and valid_size > 0:\n ## Train/Valid\n train_idx, valid_idx = random_index_split(len(train), 1-valid_size, (maxsize, None)) # No maxsize for validation\n fold_idxs['train'] = train_idx\n fold_idxs['valid'] = valid_idx\n\n for k, idxs in fold_idxs.items():\n if maxsize and maxsize < len(idxs):\n fold_idxs[k] = np.sort(np.random.choice(idxs, maxsize, replace = False))\n\n sampler_class = SubsetRandomSampler if shuffle else SubsetSampler\n fold_samplers = {k: sampler_class(idxs) for k,idxs in fold_idxs.items()}\n\n ### Create DataLoaders\n dataloader_args = dict(batch_size=batch_size,num_workers=num_workers)\n\n fold_loaders = {k:dataloader.DataLoader(train, sampler=sampler,**dataloader_args)\n for k,sampler in fold_samplers.items()}\n\n if maxsize_test and maxsize_test < len(test):\n test_idxs = np.sort(np.random.choice(len(test), maxsize_test, replace = False))\n sampler_test = SubsetSampler(test_idxs) # For test don't want Random\n dataloader_args['sampler'] = sampler_test\n else:\n dataloader_args['shuffle'] = False\n test_loader = dataloader.DataLoader(test, **dataloader_args)\n fold_loaders['test'] = test_loader\n\n fnames, flens = zip(*[[k,len(v)] for k,v in fold_idxs.items()])\n fnames = '/'.join(list(fnames) + ['test'])\n flens = '/'.join(map(str, list(flens) + [len(test)]))\n\n if hasattr(train, 'data'):\n logger.info('Input Dim: {}'.format(train.data.shape[1:]))\n logger.info('Classes: {} (effective: {})'.format(len(train.classes), len(torch.unique(train.targets))))\n print(f'Fold Sizes: {flens} ({fnames})')\n\n return fold_loaders, {'train': train, 'test':test}", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def prepare_demo_dataset(path, reso, batch_size=1):\r\n transform = transforms.Compose([\r\n transforms.Resize(size=(reso, reso), interpolation=3),\r\n transforms.ToTensor()\r\n ])\r\n\r\n img_datasets = DemoDataset(path, transform)\r\n dataloader = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=8)\r\n\r\n return img_datasets, dataloader", "def to_DataLoader(self, **kwargs):\r\n return DataLoader(self, **kwargs)", "def get_test_loader(test_dataset,\n batch_size,\n num_workers=4,\n pin_memory=False):\n data_loader = torchutils.DataLoader(\n test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n return data_loader", "def load_data(is_train, num_par=4):\n if is_train:\n src = FLAGS.train_data_path\n else:\n src = FLAGS.dev_data_path\n\n if src is None:\n raise ValueError(\"Missing data path\")\n\n if FLAGS.dataset == \"boolq\":\n return load_boolq_file(src, num_par)\n else:\n return load_nli_file(src, num_par)", "def from_torch(\n dataset: \"torch.utils.data.Dataset\",\n) -> Dataset:\n return from_items(list(dataset))", "def make_data_loader(examples, batch_size=100, shuffle=True):\n x, y = zip(*examples) # makes lists of windows and tags\n x, y = tr.from_numpy(np.array(x)), tr.from_numpy(np.array(y))\n x, y = x.type(tr.LongTensor), y.type(tr.LongTensor) # convert lists to tensors\n train = utdata.TensorDataset(x, y)\n return utdata.DataLoader(train, batch_size, shuffle)", "def get_loader(split):\n assert split in ['train', 'val', 'trainval', 'test']\n image_feature_path = config.rcnn_trainval_path if split != 'test' else config.rcnn_test_path\n dataset = VQAFeatureDataset(\n split,\n image_feature_path,\n )\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=512,\n shuffle=True if split not in ['val', 'test'] else False, # only shuffle the data in training\n pin_memory=True,\n num_workers=config.workers,\n )\n return loader", "def _custom_data_loader(self) -> DataLoader:\n dataloaders = DataLoader(self.dataset, batch_size=1)\n return dataloaders", "def load_data(self) -> None:", "def create_dataset_sampler_loader(file_path, cuda, batch_size, hvd):\n # When supported, use 'forkserver' to spawn dataloader workers\n # instead of 'fork' to prevent issues with Infiniband implementations\n # that are not fork-safe.\n kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\n if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context')\n and 'forkserver' in mp.get_all_start_methods()):\n kwargs['multiprocessing_context'] = 'forkserver'\n\n # create dataset\n dataset = MNISTDataset(file_path)\n # Horovod: use DistributedSampler to partition the training data\n sampler = Data.distributed.DistributedSampler(\n dataset, num_replicas=hvd.size(), rank=hvd.rank())\n loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, sampler=sampler, **kwargs)\n return dataset, sampler, loader", "def load_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n for directory in [train_dir, valid_dir, test_dir]:\n if not os.path.isdir(directory):\n raise IOError(\"Directory \" + directory + \" does not exist\")\n \n # Define transforms for the training, validation, and testing sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n data_transforms = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n \n # Load the datasets with ImageFolder\n train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_datasets = datasets.ImageFolder(valid_dir, transform=data_transforms)\n test_datasets = datasets.ImageFolder(test_dir, transform=data_transforms)\n \n # Using the image datasets and the trainforms, define the dataloaders\n trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle=True)\n validloader = torch.utils.data.DataLoader(valid_datasets, batch_size=32, shuffle=True)\n testloader = torch.utils.data.DataLoader(test_datasets, batch_size=32, shuffle=True)\n \n return {\n 'datasets': {\n 'train': train_datasets,\n 'valid': valid_datasets,\n 'test': test_datasets\n },\n 'loader': {\n 'train': trainloader,\n 'valid': validloader,\n 'test': testloader\n }\n }", "def load_data(data_dir):\n\n logger = logging.getLogger('main.predata.load_data')\n try:\n cars_data = CarsDataset(os.path.join(data_dir, 'devkit/cars_train_annos.mat'),\n os.path.join(data_dir, 'cars_train'),\n os.path.join(data_dir, 'devkit/cars_meta.mat'),\n transform=transforms.Compose([\n transforms.Scale(250),\n transforms.RandomSizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4706145, 0.46000465, 0.45479808),\n (0.26668432, 0.26578658, 0.2706199))\n ]),\n )\n except TypeError:\n logger.exception('data error:')\n\n train_loader = DataLoader(cars_data, batch_size=1, shuffle=True)\n logger.info(' Size data: {}'.format(len(cars_data)))\n\n return train_loader", "def make_data_loader(condition, root='data', base_path='ut-zap50k-images',\n files_json_path='filenames.json',\n batch_size=64, shuffle=False, **kwargs):\n dataset = make_dataset(condition, root, base_path, files_json_path)\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=shuffle, **kwargs)\n return loader", "def learnDataset(self, data_loader):\n print(\"learning dataset\")\n\n count = 0\n for sample in data_loader:\n input_sentence = sample[\"input\"][0]\n target_sentence = sample[\"target\"][0]\n\n prev_word = None\n for word_idx in range(1, 16):\n target_word = int(target_sentence[word_idx])\n self.model_parts[word_idx - 1].populateFactors(\n input_sentence, target_word, prev_word\n )\n prev_word = target_word\n\n print(\"{}/127940\".format(count), end = \"\\r\")\n count += 1\n print(\"127940/127940\")\n\n print(\"before fixed\", list(self.model_parts[0].factors[0].d.keys())[:10])\n for i in range(15):\n self.model_parts[i].fixed()\n print(\"after fixed\", self.model_parts[0].factors[0].keys[:10])", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def train(train_loader : torch.utils.data.DataLoader, model : nn.Module, criterion : nn.Module, optimizer : torch.optim.Optimizer) -> logger.Result:", "def get_inference_dataset(dataset_path,debug=False):\n\n if not os.path.exists(dataset_path):\n assert False, \"Couldn't find path : '{}'\".format(dataset_path)\n print(\"\\nprocessing data :'{}'\\n\".format(dataset_path))\n\n path = os.getcwd()\n os.chdir(dataset_path)\n\n dataset = []\n for file in tqdm(os.listdir('.')):\n if not file.endswith('features'):\n continue\n name = file.replace(\".features\", \"\") # removing \"features\"\n x = np.loadtxt(name + '.features')\n np.nan_to_num(x, copy=False)\n #get labels file\n if os.path.exists(name + '.test.labels'):\n labels_file = open(name + '.test.labels').readlines()\n elif os.path.exists(name + '.labels'):\n labels_file = open(name + '.labels').readlines()\n else:\n continue\n file_info = (name , float(labels_file[-2].split(' ')[-1]),\n np.fromstring(labels_file[1].strip(), sep=' ')[:2],\n float(labels_file[2]))#(file name,window_offset,(onset,offset),vot_type)\n\n dataset.append([torch.from_numpy(x).float(), file_info])\n if debug and len(dataset)>100:\n break\n os.chdir(path)\n\n return DataLoader(dataset,shuffle=False)", "def get_data_loader(target_classes, batch_size):\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n ########################################################################\n # The output of torchvision datasets are PILImage images of range [0, 1].\n # We transform them to Tensors of normalized range [-1, 1].\n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n # Get the list of indices to sample from\n relevant_train_indices = get_relevant_indices(\n trainset,\n classes,\n target_classes)\n # Split into train and validation\n np.random.seed(1000) # Fixed numpy random seed for reproducible shuffling\n np.random.shuffle(relevant_train_indices)\n split = int(len(relevant_train_indices) * 0.8)\n relevant_train_indices, relevant_val_indices = relevant_train_indices[:split], relevant_train_indices[split:]\n train_sampler = SubsetRandomSampler(relevant_train_indices)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=train_sampler)\n val_sampler = SubsetRandomSampler(relevant_val_indices)\n val_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n num_workers=0, sampler=val_sampler)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n relevant_test_indices = get_relevant_indices(testset, classes, target_classes)\n test_sampler = SubsetRandomSampler(relevant_test_indices)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n num_workers=0, sampler=test_sampler)\n return train_loader, val_loader, test_loader, classes", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def load_data(root, num_seen, batch_size, num_workers):\n CIFAR10.init(root, num_seen)\n query_dataset = CIFAR10('query', transform=query_transform())\n seen_dataset = CIFAR10('seen', transform=train_transform())\n unseen_dataset = CIFAR10('unseen', transform=train_transform())\n retrieval_dataset = CIFAR10('retrieval', transform=train_transform())\n\n query_dataloader = DataLoader(\n query_dataset,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n seen_dataloader = DataLoader(\n seen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n unseen_dataloader = DataLoader(\n unseen_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n retrieval_dataloader = DataLoader(\n retrieval_dataset,\n shuffle=True,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n )\n\n return query_dataloader, seen_dataloader, unseen_dataloader, retrieval_dataloader", "def cycle_dataset(self, loader):\r\n if loader.training:\r\n self.actor.train()\r\n else:\r\n self.actor.eval()\r\n\r\n self._init_timing()\r\n\r\n for i, data in enumerate(loader, 1):\r\n # get inputs\r\n data = self.to_variable(data)\r\n data['epoch'] = self.epoch\r\n data['settings'] = self.settings\r\n\r\n # forward pass\r\n loss, stats = self.actor(data)\r\n\r\n # backward pass and update weights\r\n if loader.training:\r\n loss.backward()\r\n apply_collective_grads = getattr(self.actor.net,\r\n \"apply_collective_grads\", None)\r\n if callable(apply_collective_grads):\r\n apply_collective_grads()\r\n self.optimizer.minimize(loss)\r\n self.actor.net.clear_gradients()\r\n\r\n # update statistics\r\n batch_size = data['train_images'].shape[loader.stack_dim]\r\n self._update_stats(stats, batch_size, loader)\r\n\r\n self._print_stats(i, loader, batch_size)\r\n\r\n if i % loader.__len__() == 0:\r\n self.save_checkpoint()\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n return", "def load_data(args, dataset_str):\n\n if dataset_str == 'friendster':\n dataset = h5py.File(\"../data/friendster/friendster_25K.h5\")\n adj_list = dataset[\"adjacency\"][:] # Adjacency list\n if args.model_choice == 'gs' or args.model_choice == 'gs_rand':\n graph = defaultdict(set)\n for i in range(len(adj_list)):\n for j in adj_list[i]:\n graph[i].add(j)\n graph[j].add(i)\n adj = graph\n else:\n adj = torch.zeros((len(adj_list), len(adj_list)))\n for i in range(len(adj_list)):\n for j in adj_list[i]:\n adj[i, j] = 1\n features = dataset[\"features\"][:] # Feature matrix\n labels = np.load(\"../data/friendster/age_labels.npy\", allow_pickle=True)\n features = features[:, 1:]\n mu = features.mean(0)\n sigma = features.std(0)\n sigma[sigma == 0] = 1\n features = (features - mu) / sigma\n features = torch.FloatTensor(features)\n elif dataset_str == 'fb':\n edge_list = np.load(\"../data/fb.edgelist.npy\")\n labels = np.load(\"../data/fb.labels.npy\")\n adj = torch.zeros((len(labels)), len(labels))\n for (i,j) in edge_list:\n adj[i, j] = 1\n adj[j, i] = 1\n features = np.load(\"../data/fb.attrs.npy\")\n features = torch.FloatTensor(features)\n # print(labels)\n elif dataset_str == 'protein':\n edge_list = np.loadtxt(\"../data/proteins/edges_protein.txt\")\n labels = np.loadtxt(\"../data/proteins/labels_protein.txt\")\n features = np.load(\"../data/proteins/features_protein.npy\")\n mu = features.mean(0)\n sigma = features.std(0)\n sigma[sigma == 0] = 1\n features = (features - mu) / sigma\n features = torch.FloatTensor(features)\n if args.model_choice == 'gs_rand':\n graph = defaultdict(set)\n for (i, j) in edge_list:\n graph[i].add(j)\n graph[j].add(i)\n graph[8890].add(8890)\n graph[11963].add(11963)\n adj = graph\n\n else:\n adj = torch.zeros((len(labels)), len(labels))\n for (i, j) in edge_list:\n adj[int(i), int(j)] = 1\n adj[int(j), int(i)] = 1\n\n else:\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"../data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"../data/ind.{}.test.index\".format(dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n labels = torch.LongTensor(labels)\n labels = torch.max(labels, 1)[1]\n features = normalize(features)\n features = torch.FloatTensor(np.array(features.todense()))\n if not args.model_choice == 'gs' and not args.model_choice == 'gs_rand':\n # print(adj)\n adj = sp.coo_matrix(adj)\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n elif args.dataset != 'friendster' and args.dataset != 'protein':\n adj = sp.coo_matrix(adj)\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n adj = np.array(adj.todense())\n graph = defaultdict(set)\n edges = set()\n for i, v in enumerate(adj):\n for j, u in enumerate(v):\n if u != 0 and frozenset([i, j]) not in edges:\n edges.add(frozenset([i, j]))\n graph[i].add(j)\n graph[j].add(i)\n adj = graph\n labels = torch.LongTensor(labels)\n if args.dataset != 'protein':\n idx_train_full = torch.from_numpy(\n np.loadtxt('../data/idx_train_' + args.dataset + '_' + str(args.trial) + '.txt')).long()\n idx_test = torch.from_numpy(\n np.loadtxt('../data/idx_test_' + args.dataset + '_' + str(args.trial) + '.txt')).long()\n idx_val_full = torch.from_numpy(\n np.loadtxt('../data/idx_val_' + args.dataset + '_' + str(args.trial) + '.txt')).long()\n\n return adj, features, labels, idx_train_full, idx_val_full, idx_test", "def load_data(self, modalities, args):\n print(\"Loading data...\")\n data_dir = os.path.abspath(args.data_dir)\n train_data = SubtitlesDataset(modalities, data_dir, mode='train',\n truncate=True, item_as_dict=True)\n test_data = SubtitlesDataset(modalities, data_dir, mode='test',\n truncate=True, item_as_dict=True)\n print(\"Done.\")\n if len(args.normalize) > 0:\n print(\"Normalizing \", args.normalize, \"...\")\n # Normalize test data using training data as reference\n test_data.normalize_(modalities=args.normalize,\n ref_data=train_data)\n # Normalize training data in-place\n train_data.normalize_(modalities=args.normalize)\n return train_data, test_data", "def load_data(self,split='train'):\n raise NotImplementedError", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def get_precomp_loader(data_path, data_split, opt, batch_size=100,\n shuffle=True, num_workers=16):\n dset = PrecompDataset(data_path, data_split, opt)\n\n data_loader = torch.utils.data.DataLoader(dataset=dset,\n batch_size=batch_size,\n shuffle=shuffle,\n pin_memory=True,\n collate_fn=collate_fn,num_workers = num_workers)\n return data_loader", "def build_data_loader(dataset, micro_batch_size, num_workers, drop_last,\n task_collate_fn=None):\n\n # Sampler.\n world_size = mpu.get_data_parallel_world_size()\n rank = mpu.get_data_parallel_rank()\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset, num_replicas=world_size, rank=rank)\n\n # Data loader. Note that batch size is the per GPU batch size.\n data_loader = torch.utils.data.DataLoader(dataset,\n batch_size=micro_batch_size,\n sampler=sampler,\n shuffle=False,\n num_workers=num_workers,\n drop_last=drop_last,\n pin_memory=True,\n collate_fn=task_collate_fn)\n\n return data_loader", "def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def initialise_dataset_loader(\n self, data_param=None, task_param=None, data_partitioner=None):\n raise NotImplementedError", "def load_mnist(data_filename, batch_size):\n\n train_data, valid_data, test_data = unpickle_mnist(data_filename)\n\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(valid_data, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)\n\n return train_loader, valid_loader, test_loader", "def DataLoader(data_place):\n # Nd = []\n # Np = []\n # Nz = []\n # channel_num = []\n # images = []\n # id_labels = []\n # pose_labels = []\n\n # mycase\n # Nz = 50\n # channel_num = 3\n # images = np.load('{}/images.npy'.format(data_place))\n # id_labels = np.load('{}/ids.npy'.format(data_place))\n # pose_labels = np.load('{}/yaws.npy'.format(data_place))\n #\n # Np = int(pose_labels.max() + 1)\n # Nd = int(id_labels.max() + 1)\n #\n # return [images, id_labels, pose_labels, Nd, Np, Nz, channel_num]\n\n # mycase MultiPIE\n Nz = 50\n channel_num = 3\n image_attributes_df = pd.read_csv(data_place)\n\n Nd = int(np.max(image_attributes_df['Id'])+1)\n Np = int(np.max(image_attributes_df['pose'])+1)\n Ni = int(np.max(image_attributes_df['illum'])+1)\n\n return [image_attributes_df, Nd, Np, Ni, Nz, channel_num]", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def load(self, dataset, model_dir):\n raise NotImplementedError", "def make_loader(dataset, train_batch_size, validation_split=0.2):\n # number of samples in train and test set\n train_len = int(len(dataset) * (1 - validation_split))\n test_len = len(dataset) - train_len\n train_set, test_set = torch.utils.data.random_split(dataset, [train_len, test_len])\n # create train_loader\n train_loader = torch.utils.data.DataLoader(\n train_set, batch_size=train_batch_size, shuffle=True,\n )\n # create test_loader\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False,)\n return train_loader, test_loader", "def get_faces_loaders(batch_size=128, test=True, data_path=\"./data/\"):\n\n dat = np.load(data_path + \"rotated_faces_data.npz\")\n train_images = torch.FloatTensor(dat['train_images'])\n train_targets = torch.FloatTensor(dat['train_angles'])\n\n traindata = torch.utils.data.TensorDataset(train_images, train_targets)\n trainloader = torch.utils.data.DataLoader(traindata, batch_size=batch_size,\n shuffle=True)\n\n if test:\n test_images = torch.FloatTensor(dat['test_images'])\n test_targets = torch.FloatTensor(dat['test_angles'])\n\n testdata = torch.utils.data.TensorDataset(test_images, test_targets)\n testloader = torch.utils.data.DataLoader(testdata, batch_size=batch_size)\n\n return trainloader, testloader\n\n return trainloader", "def init_loaders(self, *args, **kwargs):\n\n # Convert the data to Dataset\n dataset_dict = self.init_datasets(*args, **kwargs)\n\n # If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used\n if hasattr(dataset_dict[\"train\"], \"collate_fn\") and callable(\n getattr(dataset_dict[\"train\"], \"collate_fn\")\n ):\n collate_fn = dataset_dict[\"train\"].collate_fn\n else:\n collate_fn = default_collate\n\n # If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set\n # are drawn per epoch.\n # Otherwise, an epoch is defined by a full run through all of the data in the dataloader.\n #\n if self.config_dict.get(\"iters_per_epoch\") is not None:\n num_samples = (\n self.config_dict[\"iters_per_epoch\"] * self.config_dict[\"batch_size\"]\n )\n loaders_dict = {}\n for key in dataset_dict.keys():\n if key == \"train\":\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_sampler=BatchSampler(\n RandomSampler(\n dataset_dict[key],\n replacement=True,\n num_samples=num_samples,\n ),\n batch_size=self.config_dict[\"batch_size\"],\n drop_last=False,\n ),\n collate_fn=collate_fn,\n )\n else:\n loaders_dict[key] = DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n else:\n loaders_dict = {\n key: DataLoader(\n dataset_dict[key],\n batch_size=self.config_dict[\"batch_size\"],\n collate_fn=collate_fn,\n )\n for key in data_dict.keys()\n }\n\n return loaders_dict", "def author_prediction_loader(dataset, train=True, batch_size=None):\n if batch_size is None:\n batch_size = len(dataset)\n if train:\n weights, num_samples = sampler_weights(dataset)\n sampler = WeightedRandomSampler(weights=weights, num_samples=num_samples, replacement=True)\n return DataLoader(dataset=dataset, batch_size=batch_size, sampler=sampler)\n else:\n return DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False)", "def load_data(self):", "def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def get_loaders(img_size=CONFIG[\"matrix_size\"], batch_size=CONFIG[\"batch_size\"],\n used_keypoints=CONFIG[\"used_keypoints\"], interpolation_frames=CONFIG[\"interpolation_frames\"],\n noise_frames=CONFIG[\"noise_frames\"], all_data=None, all_labels=None):\n\n if all_data is None or all_labels is None:\n all_data, all_labels = load_video_data_labels(interpolation_frames, noise_frames, used_keypoints, img_size)\n\n p = np.random.permutation(len(all_data))\n train_len = int(len(p) / 80)\n others_len = int((len(p) - train_len) / 2)\n\n train_data, train_labels = all_data[p[:train_len]], all_labels[p[:train_len]]\n val_data = all_data[p[train_len:train_len + others_len]]\n val_labels = all_labels[p[train_len:train_len + others_len]]\n test_data, test_labels = all_data[p[-others_len:]], all_labels[p[-others_len:]]\n\n # Transform to tensor\n train_data_tensor, train_labels_tensor = torch.from_numpy(train_data), torch.from_numpy(train_labels)\n val_data_tensor, val_labels_tensor = torch.from_numpy(val_data), torch.from_numpy(val_labels)\n test_data_tensor, test_labels_tensor = torch.from_numpy(test_data), torch.from_numpy(test_labels)\n\n # Data Loader for easy mini-batch return in training, load the Dataset from the numpy arrays\n train_loader = DataLoader(TensorDataset(train_data_tensor, train_labels_tensor), batch_size=batch_size)\n val_loader = DataLoader(TensorDataset(val_data_tensor, val_labels_tensor), batch_size=batch_size)\n test_loader = DataLoader(TensorDataset(test_data_tensor, test_labels_tensor), batch_size=batch_size)\n\n data = {\"train_data\": train_data,\n \"train_labels\": train_labels,\n \"val_data\": val_data,\n \"val_labels\": val_labels,\n \"test_data\": test_data,\n \"test_labels\": test_labels,\n \"all_data\": all_data[p],\n \"all_labels\": all_labels[p]}\n\n return data, train_loader, val_loader, test_loader", "def get_test_loader(\n data_dir: Path,\n batch_size: int,\n *,\n num_workers: int = 4,\n pin_memory: bool = False,\n using_cuda: bool = True,\n ) -> torch.utils.data.DataLoader:\n # define transforms\n\n if using_cuda:\n assert num_workers == 1\n assert pin_memory == True\n\n dataset = MNISTDataset(data_dir, split=Split.Testing)\n\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n\n return data_loader", "def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )", "def get_test_loader(data_dir,\n batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=False):\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n\n # define transform\n transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n dataset = datasets.CIFAR10(\n root=data_dir, train=False,\n download=True, transform=transform,\n )\n\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, pin_memory=pin_memory,\n )\n\n return data_loader" ]
[ "0.7888709", "0.785579", "0.7751118", "0.7633787", "0.7525572", "0.7270675", "0.7166334", "0.7075442", "0.70753306", "0.70659876", "0.69723433", "0.6967991", "0.69530183", "0.6943673", "0.6889306", "0.68799275", "0.6872247", "0.68637246", "0.6859212", "0.6836871", "0.68136996", "0.6771447", "0.6769829", "0.6766648", "0.67421895", "0.6739953", "0.6726718", "0.67138743", "0.6693839", "0.66910714", "0.6682788", "0.6678408", "0.66723806", "0.66627544", "0.6649838", "0.664451", "0.66309863", "0.6565379", "0.6540001", "0.65333843", "0.6527723", "0.6516758", "0.6508825", "0.64966327", "0.6496049", "0.64887524", "0.64787126", "0.6473412", "0.6448254", "0.64476126", "0.64375097", "0.64155066", "0.6413594", "0.63992685", "0.63953596", "0.6394254", "0.63869286", "0.63854086", "0.63305265", "0.6329684", "0.6321896", "0.631764", "0.62840766", "0.628386", "0.62821174", "0.6278158", "0.6276678", "0.6266047", "0.6241504", "0.62401646", "0.62387866", "0.6231143", "0.6227885", "0.62259066", "0.62203276", "0.6216751", "0.61869675", "0.6182924", "0.6180618", "0.6176544", "0.61675966", "0.61576647", "0.6157564", "0.61561435", "0.61533517", "0.61326176", "0.61325973", "0.6122614", "0.61166173", "0.6109179", "0.610581", "0.6102321", "0.60979545", "0.6096236", "0.6092254", "0.6088358", "0.6086105", "0.6083072", "0.6075421", "0.60659224", "0.60526806" ]
0.0
-1
Padding function for 1D sequences
def __pad__(sequence, max_l): if max_l - len(sequence) < 0: sequence = sequence[:max_l] else: sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0)) return sequence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(seq, n):\n return", "def pad_sequence(xs, length=None, padding=0):\n return PadSequence(length, padding).apply((xs))[0]", "def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added", "def conv_pad(x, ks, mode):\n\tpad = (int(np.floor((ks-1)/2)), int(np.ceil((ks-1)/2)))\n\treturn F.pad(x, (*pad, *pad), mode=mode)", "def pad_sequences(sequences):\n max_len = max(s.shape[0] for s in sequences)\n padded = []\n for seq in sequences:\n zero_pad = np.concatenate(\n [seq, np.zeros((max_len - seq.shape[0], ) + seq.shape[1:])])\n padded.append(zero_pad[np.newaxis, :])\n\n return np.concatenate(padded, axis=0)", "def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result", "def temporal_padding(x, padding=(1, 1)):\n assert len(padding) == 2\n pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n return tf.pad(x, pattern)", "def pad_sequences_1d(sequences, max_len=None, padding='post', truncating='post', value=0.):\n return pad_sequences(sequences, maxlen=max_len, padding=padding, truncating=truncating,\n value=value)", "def pad(x, padding, fill_value=0):\n input_shape = x.shape\n output_shape = []\n indices = []\n\n for dim, pad in enumerate(padding):\n try:\n left_pad, right_pad = pad\n except TypeError:\n left_pad = right_pad = pad\n output_shape.append(left_pad + input_shape[dim] + right_pad)\n indices.append(slice(left_pad, left_pad + input_shape[dim]))\n\n if fill_value:\n out = T.ones(output_shape) * fill_value\n else:\n out = T.zeros(output_shape)\n return T.set_subtensor(out[tuple(indices)], x)", "def create_padding_mask(seq):\r\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\r\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\r", "def _pad1d(self, x: torch.Tensor, padding_left: int, padding_right: int, mode: str = \"zero\", value: float = 0.0):\n length = x.shape[-1]\n if mode == \"reflect\":\n max_pad = max(padding_left, padding_right)\n if length <= max_pad:\n x = F.pad(x, (0, max_pad - length + 1))\n return F.pad(x, (padding_left, padding_right), mode, value)", "def keypoints_sequence_padding(keypoints_sequence: np.array, output_length: int) -> np.array:\n output_sequence = np.copy(keypoints_sequence)\n input_seq_length = keypoints_sequence.shape[0]\n\n if input_seq_length < output_length:\n pad_sequence = np.zeros([output_length - input_seq_length, keypoints_sequence.shape[1], keypoints_sequence.shape[2]])\n pad_sequence[:] = keypoints_sequence[input_seq_length - 1]\n output_sequence = np.append(output_sequence, pad_sequence, axis=0)\n\n return output_sequence[:output_length]", "def _pad_sequences(sequences, pad=PAD):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def padding(a, dim):\n\n return np.pad(a, (0, dim-len(a)), 'constant', constant_values=(0))", "def _pad_data(data, pad_length, padding_type='same'):\n\n # get the sampling period (or distance between sampling points, for PLUX devices this is always 1)\n # it is assumed that the signals are equidistantly sampled therefore only the distance between to sampling points\n # is needed to calculate the sampling period\n T = data[:, 0][1] - data[:, 0][0]\n\n if padding_type == 'same':\n\n # create the 'same' padding array\n padding = np.tile(data[-1, 1:], (pad_length, 1))\n\n elif padding_type == 'zero':\n\n # get the number of columns for the zero padding\n num_cols = data.shape[1] - 1 # ignoring the time/sample column\n\n # create the zero padding array\n padding = np.zeros((pad_length, num_cols))\n\n else:\n\n IOError('The padding type you chose is not defined. Use either \\'same\\ or \\'zero\\'.')\n\n # create the time / sample axis that needs to be padded\n start = data[:, 0][-1] + T\n stop = start + (T * pad_length)\n time_pad = np.arange(start, stop, T)\n time_pad = time_pad[:pad_length] # crop the array if there are to many values\n\n # expand dimension for hstack operation\n time_pad = np.expand_dims(time_pad, axis=1)\n\n # hstack the time_pad and the zero_pad to get the final padding array\n pad_array = np.hstack((time_pad, padding))\n\n # vstack the pad_array and the new_array\n padded_data = np.vstack([data, pad_array])\n\n return padded_data", "def add_padding(x, maxlen=500):\n \n # May want to increase maxlen from 500! Not sure the total dist of chomragram lengths.\n\n for i in range(len(x)):\n x[i] = x[i][:,:maxlen]\n q = maxlen - x[i].shape[1]\n p = q//2\n# if q % 2 == 0:\n# x[i] = np.pad(x[i], ((p,p), (0,0)), 'constant', constant_values=(0,0))\n# else:\n# x[i] = np.pad(x[i], ((p,p+1), (0,0)), 'constant', constant_values=(0,0))\n\n print\n if q % 2 == 0:\n x[i] = np.pad(x[i], ((0,0), (p,p)), 'constant', constant_values=(0,0))\n else:\n x[i] = np.pad(x[i], ((0,0), (p,p+1)), 'constant', constant_values=(0,0))\n \n return x", "def Padding_Signal(signal,M = 10):\t\t\t\t\t\t\t\t\t\t# Function to pad a signal\n\ts = signal.shape[0]\n\tsignal_change = np.zeros(s+2*M)\n\tsignal_change[M:s+M] = signal\n\t\n\treturn signal_change", "def pad_1d(x, pad_left, pad_right, mode='constant', value=0.):\n if (pad_left >= x.shape[-1]) or (pad_right >= x.shape[-1]):\n if mode == 'reflect':\n raise ValueError('Indefinite padding size (larger than tensor).')\n res = F.pad(x.unsqueeze(2),\n (pad_left, pad_right, 0, 0),\n mode=mode, value=value).squeeze(2)\n return res", "def paddingSequence(X_train, X_test, maxLen=30):\r\n #######equalize list of seq\r\n X_train = pad_sequences(X_train, maxLen, padding='post', truncating='post')\r\n X_test = pad_sequences(X_test, maxLen, padding='post', truncating='post')\r\n return X_train, X_test", "def transform_padding(pad_width):\n num_pad_values = len(pad_width)\n onnx_pad_width = [0]*num_pad_values\n\n start_index = 0\n # num_pad_values will always be multiple of 2\n end_index = int(num_pad_values/2)\n for idx in range(0, num_pad_values):\n if idx % 2 == 0:\n onnx_pad_width[start_index] = pad_width[idx]\n start_index += 1\n else:\n onnx_pad_width[end_index] = pad_width[idx]\n end_index += 1\n\n return onnx_pad_width", "def pad(self, src):\n if(self.pre_pad):\n dst = src.new(\n src.size(0),\n src.size(1),\n src.size(2),\n src.size(3),\n 2\n ).zero_()\n dst.narrow(dst.ndimension()-1, 0, 1).copy_(\n torch.unsqueeze(src, 4)\n )\n else:\n padded = self.padding_module.updateOutput(src)\n dst = src.new(\n padded.size(0),\n padded.size(1),\n padded.size(2),\n padded.size(3),\n 2\n ).zero_()\n dst.narrow(4, 0, 1).copy_(\n torch.unsqueeze(padded, 4)\n )\n return dst", "def pad_binary_signal(x, pad_len=10):\n n = len(x)\n one_idx = np.arange(n)[x == 1]\n\n if len(one_idx) == 0:\n return x\n\n y = np.zeros(n)\n for idx in one_idx:\n start = max(idx - pad_len, 0)\n end = min(idx + pad_len + 1, n)\n y[start:end] = 1.0\n\n return y", "def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')", "def pad(x, system_shape, pad_size):\n res = unpad(tf.tile(x, (1,)+(3,)*len(pad_size)),\n tuple(s-p for s, p in zip(system_shape, pad_size)))\n return res", "def padding(old, l):\n new = deepcopy(old)\n for i, j in enumerate(new):\n new[i] += [0] * (l - len(j))\n new[i] = j[:l]\n return new", "def calculate_padding_to_align(length, align):\n return 0 if length % align == 0 else (align - (length % align))", "def pad_sequences(self, X):\n return pad_sequences(X, maxlen=self.pad_length)", "def pad_shift(x, shift, padv=0.0):\n if shift > 0:\n padding = torch.ones(x.size(0), shift, x.size(2)).to(x.device) * padv\n return torch.cat((padding, x[:, :-shift, :]), dim=1)\n elif shift < 0:\n padding = torch.ones(x.size(0), -shift, x.size(2)).to(x.device) * padv\n return torch.cat((x[:, -shift:, :], padding), dim=1)\n else:\n return x", "def padding(sent, sequence_len):\n if len(sent) > sequence_len:\n sent = sent[:sequence_len]\n padding = sequence_len - len(sent)\n sent2idx = sent + [0]*padding\n return sent2idx, len(sent)", "def padAlignment(align, applyPadding=True):\n if type(align) in [dict, np.ndarray, list]:\n align = pd.Series(align)\n\n \"\"\"Replace * and # with - and - \"\"\"\n for ind in align.index:\n if '*' in align[ind]:\n align[ind] = align[ind].replace('*', '-')\n if '#' in align[ind]:\n align[ind] = align[ind].replace('#', '-')\n \"\"\"Pad with gaps if the lengths are all the same\"\"\"\n if applyPadding:\n L = align.map(len).unique()\n if len(L) > 1:\n #print 'Sequences have different lengths (pading with gaps): %s' % L\n L = L.max()\n for ind in align.index:\n if len(align[ind]) < L:\n align[ind] = align[ind].ljust(L, '-')\n else:\n L = L.max()\n return align", "def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)", "def pad_and_onehot(data, pad_len=None, extra_padding=200):\n if pad_len is None:\n pad_len = max(len(x) for x in data) + extra_padding\n data = [\n onehot(np.pad(trace, (0, pad_len - len(trace)), mode=\"constant\"))\n for trace in data\n ]\n return pad_len, np.array(data)", "def pad_same(width, kernel, slide):\n res = (width - kernel) / slide + 1\n pad = (width - res) / 2\n return pad", "def add_periodic_padding(X, pad_size):\n\tpad_size = np.array(pad_size)\n\tn_duplicates = tuple([int(x) for x in np.ceil(pad_size/np.array(X.shape))*2 + 1])\n\tX_out = np.tile(X, n_duplicates)\n\tn_dlt = [int(x) for x in (np.array(X.shape) - np.mod(pad_size, np.array(X.shape)))]\n\tX_out = X_out[:-n_dlt[0], :]\n\tX_out = X_out[:, :-n_dlt[1]]\n\tX_out = X_out[n_dlt[0]:, :]\n\tX_out = X_out[:, n_dlt[1]:]\n\treturn X_out", "def pad_labellings(labels):\n target_length = max([len(labels) for labels in labels])\n padded = []\n\n for label in labels:\n padding_size = target_length - len(label)\n\n padded_label = label + [0] * padding_size\n\n assert len(padded_label) > 0\n\n padded.append(padded_label)\n\n return padded", "def _pad_feature_sequences(sequences, pad=PAD, feature_dims=768):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]], [0, 0]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def padding_tensor(sequences, max_length=1000000):\n # get the number of sequences\n num = len(sequences)\n # get the maximum length (clip too long sequences)\n max_len = min(max([s.shape[0] for s in sequences]), max_length)\n # define new output dimensions\n out_dims = (num, max_len, *sequences[0].shape[1:])\n # create output_tensor with new dimensionality\n out_tensor = sequences[0].data.new(*out_dims).fill_(0)\n # create new mask_tensor with the corresponding mask\n mask = sequences[0].data.new(*out_dims).fill_(0)\n # iterate over the sequences\n logger.info('Start padding breaths....')\n with tqdm(\n total=len(sequences),\n bar_format=\"{desc:<5.5}{percentage:3.0f}%|{bar:100}{r_bar}\",\n ascii=True\n ) as pbar:\n for i, tensor in enumerate(sequences):\n # get the length of the current breath\n length = min(tensor.size(0), max_len)\n # add all valid breaths\n print(tensor)\n input('before')\n out_tensor[i, :length] = tensor[:length, :]\n # for the breaths that are \"too short\" padd with last value\n out_tensor[i, length:] = 0\n print(out_tensor)\n input('after')\n # create mask\n mask[i, :length] = 1\n # update progressbar\n pbar.update(1)\n\n # return result\n return max_len, out_tensor, mask", "def _padding(self, x, shape, value=0):\n row_padding = shape[0] - x.shape[0]\n col_padding = shape[1] - x.shape[1]\n return np.pad(x, [[0, row_padding], [0, col_padding]], mode=\"constant\", constant_values=value)", "def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)", "def Batch_Padding(batch, batch_first, pad_token):\n padded_batch = torch.nn.utils.rnn.pad_sequence([torch.tensor(x) for x in batch], batch_first=True, padding_value=pad_token)\n return padded_batch", "def add_padding(*data, value, maxlen=250, padding=\"post\"):\n return [keras.preprocessing.sequence.pad_sequences(\n d, value=value, padding=padding,\n maxlen=maxlen) for d in data]", "def pad_sequences_2d(sequences, max_len_1=None, max_len_2=None, dtype='int32', padding='post',\n truncating='post', value=0.):\n lengths_1, lengths_2 = [], []\n for s in sequences:\n lengths_1.append(len(s))\n for t in s:\n lengths_2.append(len(t))\n if max_len_1 is None:\n max_len_1 = np.max(lengths_1)\n if max_len_2 is None:\n max_len_2 = np.max(lengths_2)\n\n num_samples = len(sequences)\n x = (np.ones((num_samples, max_len_1, max_len_2)) * value).astype(dtype)\n for i, s in enumerate(sequences):\n if not len(s):\n continue # empty list was found\n\n if truncating == 'pre':\n s = s[-max_len_1:]\n elif truncating == 'post':\n s = s[:max_len_1]\n else:\n raise ValueError('Truncating type \"%s\" not understood' % truncating)\n\n y = (np.ones((len(s), max_len_2)) * value).astype(dtype)\n for j, t in enumerate(s):\n if not len(t):\n continue\n\n if truncating == 'pre':\n trunc = t[-max_len_2:]\n elif truncating == 'post':\n trunc = t[:max_len_2]\n else:\n raise ValueError('Truncating type \"%s\" not understood' % truncating)\n\n trunc = np.asarray(trunc, dtype=dtype)\n\n if padding == 'post':\n y[j, :len(trunc)] = trunc\n elif padding == 'pre':\n y[j, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n\n if padding == 'post':\n x[i, :y.shape[0], :] = y\n elif padding == 'pre':\n x[i, -y.shape[0]:, :] = y\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n\n return x", "def apply_padding(self, batch_list):\n max_len = max([len(idx_seq) for idx_seq in batch_list])\n padded = [idx_seq + [self.vocab.pad_id] * (max_len - len(idx_seq)) for idx_seq in batch_list]\n return padded", "def _pad(seqs, dtype=torch.float32):\n assert len(seqs) > 0 and all(x.shape[1:] == seqs[0].shape[1:] for x in seqs)\n lens = torch.LongTensor([len(x) for x in seqs])\n max_seq_len = torch.max(lens)\n\n # padded_seq_dims: (batch, max_seq_len, ...).\n padded_seq_dims = (len(seqs), max_seq_len,) + seqs[0].shape[1:]\n res = torch.zeros(padded_seq_dims, dtype=dtype)\n for i, seq in enumerate(seqs):\n src_len = lens[i]\n res[i, :src_len] = torch.Tensor(seq)\n return res, lens", "def pad_lr(x, fsize, fshift):\n M = num_frames(len(x), fsize, fshift)\n pad = (fsize - fshift)\n T = len(x) + 2 * pad\n r = (M - 1) * fshift + fsize - T\n return pad, pad + r", "def _pad(x, depth=4):\n divisor = np.power(2, depth)\n remainder = x.shape[0] % divisor\n\n # no padding because already of even shape\n if remainder == 0:\n return x\n # add zero rows after 1D feature\n elif len(x.shape) == 2:\n return np.pad(x, [(0, divisor - remainder), (0, 0)], \"constant\")\n # add zero columns and rows after 2D feature\n elif len(x.shape) == 3:\n return np.pad(x, [(0, divisor - remainder), (0, divisor - remainder),\n (0, 0)], \"constant\")", "def pad(size, value):\n return (value + size - 1)/size*size", "def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list", "def pad(input, pad, mode='constant', value=0):\n ndim = input.ndimension()\n pads_begin, pads_end = [0] * ndim, [0] * ndim\n for i in range(len(pad) // 2):\n pads_begin[ndim - 1 - i] = pad[i * 2]\n pads_end[ndim - 1 - i] = pad[i * 2 + 1]\n mode = {'constant': 'CONSTANT', 'reflect': 'REFLECT',\n 'replicate': 'EDGE', 'circular': 'EDGE'}[mode]\n return FunctionLib.apply(\n 'Pad', input.device, [input], mode=mode, value=float(value),\n ndim=ndim, pads=pads_begin + pads_end)", "def pad_list(xs, pad_value=0.0, pad_left=False):\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, * xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad", "def pad_list(xs, pad_value=0.0, pad_left=False):\n bs = len(xs)\n max_time = max(x.size(0) for x in xs)\n xs_pad = xs[0].new_zeros(bs, max_time, *xs[0].size()[1:]).fill_(pad_value)\n for b in range(bs):\n if len(xs[b]) == 0:\n continue\n if pad_left:\n xs_pad[b, -xs[b].size(0):] = xs[b]\n else:\n xs_pad[b, :xs[b].size(0)] = xs[b]\n return xs_pad", "def pad(self, minibatch):\n minibatch = list(minibatch)\n# if not self.sequential:\n# return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n\n for x in minibatch:\n if self.pad_first:\n padded.append(\n [self.pad_token] * max(0, max_len - len(x))\n + ([] if self.init_token is None else [self.init_token])\n + list(x[-max_len:] if self.truncate_first else x[:max_len])\n + ([] if self.eos_token is None else [self.eos_token]))\n else:\n padded.append(\n ([] if self.init_token is None else [self.init_token])\n + list(x[-max_len:] if self.truncate_first else x[:max_len])\n + ([] if self.eos_token is None else [self.eos_token])\n + [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return (padded, lengths)\n return padded", "def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img", "def _pad(self, array, sentinel, max_len=None):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n if max_len is not None:\n maxlen = max(maxlen, max_len)\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def pad(self, length):\n\n if length > self.event_roll.shape[0]:\n padding = numpy.zeros((length-self.event_roll.shape[0], self.event_roll.shape[1]))\n self.event_roll = numpy.vstack((self.event_roll, padding))\n\n elif length < self.event_roll.shape[0]:\n self.event_roll = self.event_roll[0:length, :]\n\n return self.event_roll", "def pad_sequences(sequences, maxlen, nb_sequences, dtype='int32', value=-1):\n\n x = (numpy.ones((nb_sequences, maxlen)) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n trunc = s[:maxlen]\n\n x[idx, :len(trunc)] = trunc\n\n return x", "def padding(sentence_list):\n max_len = max([sentence.size(0) for sentence in sentence_list])\n pad_sen = [sen.tolist() + [pad_index] * max(0, max_len - len(sen))\n for sen in sentence_list]\n return torch.LongTensor(pad_sen).transpose(0, 1) # shape of (T, B)", "def pad_with_border(x, n_pad):\n x_pad_list = [x[0:1]] * int(n_pad) + [x] + [x[-1:]] * int(n_pad)\n return np.concatenate(x_pad_list, axis=0)", "def pad_sequences(sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret", "def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image", "def pad(data, *args, **kwargs): # pragma: no cover\n raise NotImplementedError()", "def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device(\"cpu\"), fixed_length=None):\n if isinstance(sequences[0], list):\n if \"torch\" in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n\n extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert \"torch\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)\n else: # np\n assert \"numpy\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask # , lengths", "def pad_sentences(sentences, padding_word=\"<PAD/>\",sequence_length = 0):\n if sequence_length == 0:\n sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def pad_sequences(self,sequences, pad_func, maxlen = None):\n ret = []\n\n # Determine the maxlen\n max_value = max(map(len, sequences))\n if maxlen is None:\n maxlen = max_value\n\n # Pad / truncate (done this way to deal with np.array)\n for sequence in sequences:\n cur_seq = list(sequence[:maxlen])\n cur_seq.extend([pad_func()] * (maxlen - len(sequence)))\n ret.append(cur_seq)\n return ret", "def wrap_pad(input, size):\n M1 = tf.concat([input[:, :, -size[1]:, :], input, input[:, :, 0:size[1], :]], 2)\n M1 = tf.concat([M1[:, -size[0]:, :, :], M1, M1[:, 0:size[0], :, :]], 1)\n return M1", "def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))", "def add_padding(img, x_padding):\n w = img.shape[1] + x_padding * 2\n img_with_padding = np.zeros((img.shape[0], w, 3), dtype=img.dtype)\n img_with_padding[:, x_padding:img.shape[1] + x_padding] = img\n return img_with_padding", "def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")", "def pad(x, pad_left=0, pad_right=0, to_complex=True):\n output = pad_1d(x, pad_left, pad_right, mode='reflect')\n if to_complex:\n output = torch.stack((output, torch.zeros_like(output)), dim=-1)\n return output", "def int_padding(length, val, direction=\">\"):\n return '{0:0{direction}{fill}}'.format(val, direction=direction, fill=length)", "def padding(src, min_size):\n # pad before put into convolutional layer\n src_dim = src.dim()\n if src_dim[0][1] >= min_size:\n return src\n pad_size = min_size - src_dim[0][1]\n channels = src_dim[0][2] if len(src_dim[0]) >= 3 else 1\n if pad_size == 1:\n return dy.concatenate([src, dy.zeroes((src_dim[0][0], 1, channels))], d=1)\n else:\n left_border = int(pad_size) / 2\n right_border = (int(pad_size)+1) / 2\n return dy.concatenate([dy.zeroes((src_dim[0][0], left_border, channels)), src, dy.zeroes((src_dim[0][0], right_border, channels))], d=1) # do concatenate along cols", "def padding(self):\n pad = self.ntiles - self.windowsize\n return (int((pad - 1)/2.), int((pad + 1)/2.))", "def pad(input, pad_size):\n if not pad_size:\n return input\n return tf.pad(input, [[0,0],[pad_size, pad_size],[pad_size, pad_size],[0,0]], 'REFLECT')", "def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c", "def pad_tensor(tensor, length, padding_index=PADDING_INDEX):\n n_padding = length - tensor.shape[0]\n assert n_padding >= 0\n if n_padding == 0:\n return tensor\n padding = tensor.new(n_padding, *tensor.shape[1:]).fill_(padding_index)\n return torch.cat((tensor, padding), dim=0)", "def pad_list(xs: torch.Tensor, pad_value: int):\n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)\n\n for i in range(n_batch):\n pad[i, : xs[i].size(0)] = xs[i]\n\n return pad", "def _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs", "def padding_input(sents, pad_token=\"<pad>\", tgt_len=-1):\n if tgt_len == -1:\n tgt_len = max(len(s) for s in sents)\n batch_size = len(sents)\n seqs = []\n for i in range(batch_size):\n seqs.append(sents[i][0:tgt_len] + [pad_token] * (tgt_len - len(sents[i])))\n return seqs", "def pad(arr, n):\n m = len(arr)\n if m < n:\n return arr + type(arr)(ZERO for _ in range(n - m))\n else:\n return arr", "def channel_padding(x):\n #keras.backend.concatenate([x, tf.zeros_like(x)], axis=-1)\n x0=keras.layers.Activation('sigmoid')(x)\n return keras.backend.concatenate([x, x0], axis=-1)", "def pad_sequences(sequences, maxlen=None, dtype='int32',\n padding='pre', truncating='pre', value=0.):\n if not hasattr(sequences, '__len__'):\n raise ValueError('`sequences` must be iterable.')\n lengths = []\n for x in sequences:\n if not hasattr(x, '__len__'):\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n lengths.append(len(x))\n\n num_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0:\n sample_shape = np.asarray(s).shape[1:]\n break\n\n is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)\n if isinstance(value, str) and dtype != object and not is_dtype_str:\n raise ValueError(\"`dtype` {} is not compatible with `value`'s type: {}\\n\"\n \"You should set `dtype=object` for variable length strings.\"\n .format(dtype, type(value)))\n\n x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n for idx, s in enumerate(sequences):\n if not len(s):\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:]\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError('Truncating type \"%s\" '\n 'not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s '\n 'is different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x", "def pad_tweets(tweets, padding_word=\"<PAD/>\", sequence_length=None):\n if sequence_length is None:\n sequence_length = max(len(x) for x in tweets)\n padded_tweets = []\n for i in range(len(tweets)):\n tweet = tweets[i]\n num_padding = sequence_length - len(tweet)\n padded = tweet + [padding_word] * num_padding\n padded_tweets.append(padded)\n return padded_tweets", "def pad_prediction(prediction, input_size, pad_with=-1.0):\n pad_pred = pad_with * np.ones(input_size).astype(float)\n pred_size = prediction.shape\n D = ((input_size[0]-pred_size[0])//2, (input_size[1]-pred_size[1])//2)\n pad_pred[D[0]:D[0]+pred_size[0], D[1]:D[1]+pred_size[1]] = prediction\n return pad_pred", "def pad_zeros(n, before, after):\n pad_1_dim = (before, after)\n pad_2_dim = ((before, after), (0, 0))\n m = before + n + after\n def times(x):\n return jnp.pad(x, pad_1_dim)\n def trans(x):\n return x[before:before+n]\n return Operator(times=times, trans=trans, shape=(m,n), matrix_safe=False)", "def pad_sentences(sentences, padding_word=\"<PAD/>\"):\n # !!! 一定要注意这里会影响数据的形状,要与代码内的 sequence length 保持一致 !!!\n sequence_length = 30\n # sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i][:sequence_length]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def pad_seq_list(array, sentinel):\n # Compute max length.\n maxlen = 0\n for seq in array:\n maxlen = max(maxlen, len(seq))\n\n # Pad.\n padded = []\n lens = []\n for seq in array:\n padding = maxlen - len(seq)\n padded.append(seq + [sentinel] * padding)\n lens.append(len(seq))\n\n return padded, lens", "def pad1D(X, pad, kernel_width=None, stride=None, dilation=0):\n\tX_pad = None\n\tp = pad\n\tif isinstance(p, int):\n\t\tp = (p, p)\n\tif isinstance(p, tuple):\n\t\tX_pad = np.pad(\n\t\t\tX, \n\t\t\tpad_width=((0, 0), (p[0], p[1]), (0, 0)),\n\t\t\tmode='constant',\n\t\t\t# constant_value = 0,\n\t\t\t)\n\n\t# compute the correct padding dims for a 'same' or 'causal' convolution\n\tif p in ['same', 'causal'] and kernel_width and stride:\n\t\tcausal = p == 'causal'\n\t\tp = calc_pad_dims_1D(\n\t\t\tX.shape, X.shape[1], kernel_width, stride, causal=causal, dilation=dilation\n\t\t\t)\n\t\tX_pad, p = pad1D(X, p)\n\n\treturn X_pad, p", "def pad_to_length(word_embeddings, length, padding):\n\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)", "def _padding(inputs, paddings, data_format):\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], paddings, paddings])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], paddings, paddings, [0, 0]])\n return padded_inputs", "def pad(x, n=3, padding='valid'):\r\n assert isinstance(x, np.ndarray)\r\n padding_valid = 'valid'\r\n padding_same_real = 'same_real'\r\n assert padding in {padding_valid, padding_same_real}\r\n\r\n l = n//2\r\n r = n-l-1\r\n\r\n if padding == padding_same_real:\r\n x = np.append(x[1+r:1+r+l], x, axis=0)\r\n x = np.append(x, x[-l-1-r:-l-1], axis=0)\r\n\r\n if r:\r\n fitted_x = x[l:-r]\r\n else:\r\n fitted_x = x[l:]\r\n return x, fitted_x", "def pad_sentences(sentences, padding_word=\"<PAD/>\"):\n sequence_length = max(len(x) for x in sentences)\n padded_sentences = []\n for i in range(len(sentences)):\n sentence = sentences[i]\n num_padding = sequence_length - len(sentence)\n new_sentence = sentence + [padding_word] * num_padding\n padded_sentences.append(new_sentence)\n return padded_sentences", "def lengths_to_encoder_padding_mask(lengths, batch_first=False):\n max_lengths = torch.max(lengths).item()\n bsz = lengths.size(0)\n encoder_padding_mask = torch.arange(\n max_lengths\n ).to( # a (T, ) tensor with [0, ..., T-1]\n lengths.device\n ).view( # move to the right device\n 1, max_lengths\n ).expand( # reshape to (1, T)-shaped tensor\n bsz, -1\n ) >= lengths.view( # expand to (B, T)-shaped tensor\n bsz, 1\n ).expand(\n -1, max_lengths\n )\n if not batch_first:\n return encoder_padding_mask.t(), max_lengths\n else:\n return encoder_padding_mask, max_lengths", "def pad_zeros(x, total):\n num_pad = total - len(x)\n for idx in range(num_pad):\n x = '0' + x\n return x", "def pad_sentences(sentences, sequence_length=0, padding_word=\"<PAD/>\"):\n if sequence_length == 0:\n sequence_length = max(len(sent) for sent in sentences)\n\n padded_sentences = []\n for sent in sentences:\n if len(sent) < sequence_length:\n num_padding = sequence_length - len(sent)\n new_sentence = sent + [padding_word] * num_padding\n else:\n new_sentence = sent[:sequence_length]\n padded_sentences.append(new_sentence)\n return padded_sentences", "def __Pad(self, data):\n pad = self.block_size - len(data) % self.block_size\n return data + pad * chr(pad)", "def find_padding(known, iter_len=1):\n pad = None\n starting_length = oracle(known)\n for i in range(32):\n test_pad = random_nonb64_string(i)\n padded_length = oracle(known + test_pad)\n if padded_length != starting_length:\n pad = test_pad[:-iter_len]\n break\n return pad", "def pad_periodic(\n tensor: tf.Tensor, paddings: Sequence[Sequence[int]],\n) -> tf.Tensor:\n result = tensor\n for axis, padding in enumerate(paddings):\n result = _pad_periodic_by_axis(result, padding, axis)\n return result", "def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)" ]
[ "0.77374345", "0.76362944", "0.737829", "0.7223377", "0.7206336", "0.7144797", "0.710272", "0.70930755", "0.70851046", "0.705326", "0.7034367", "0.700346", "0.6976378", "0.6973453", "0.69480914", "0.6854283", "0.68475085", "0.6833989", "0.6814044", "0.6806954", "0.67819375", "0.6776993", "0.6755386", "0.67440826", "0.67197275", "0.67130977", "0.6712591", "0.6702856", "0.66825294", "0.668056", "0.6679206", "0.6673303", "0.6672354", "0.6669649", "0.666471", "0.6662811", "0.66585124", "0.6648335", "0.66450554", "0.66432726", "0.66291547", "0.66088635", "0.6590588", "0.6589459", "0.6523849", "0.65093946", "0.65078", "0.6494904", "0.6493582", "0.6491751", "0.6484764", "0.6482597", "0.6479896", "0.6471845", "0.6462175", "0.64574", "0.6440348", "0.6436221", "0.64329296", "0.64297444", "0.6426313", "0.642594", "0.6411806", "0.64015293", "0.6394917", "0.6394635", "0.6394187", "0.63854057", "0.63788474", "0.63743436", "0.6361307", "0.635929", "0.6359258", "0.63482094", "0.6346661", "0.6344278", "0.6338399", "0.6336217", "0.633265", "0.6321197", "0.6308231", "0.6306832", "0.630039", "0.6295963", "0.629548", "0.62929505", "0.62892604", "0.6275764", "0.6272828", "0.6265032", "0.62630606", "0.6260662", "0.6259451", "0.6259418", "0.625789", "0.6253875", "0.6252393", "0.6251009", "0.6229764", "0.62294537" ]
0.6773792
22
Embed words in a sequence using GLoVE model
def __glove_embed__(sequence, model): embedded = [] for word in sequence: embedded.append(model[word]) return embedded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs", "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()", "def glove_embedding(self, texts, file):\n self.embedding_dict = dict()\n glove_file = open(file, encoding='utf-8')\n for line in glove_file:\n word_vector = line.split()\n word = word_vector[0]\n word_vector_arr = np.asarray(word_vector[1:], dtype='float32')\n self.embedding_dict[word] = word_vector_arr\n glove_file.close()\n \n i = 0\n with pgb.ProgressBar(max_value=len(texts)) as bar:\n for text in texts:\n vec = []\n text = text.split()\n for t in text:\n try:\n vec.append(self.embedding_dict[t.lower()])\n except KeyError:\n pass\n ## There are no matched words\n if len(vec) == 0:\n print(\"len 0 vec\")\n self.word_vec.append(np.zeros((100)))\n else:\n #print(np.array(vec))\n #print(np.array(vec).shape)\n sentence = self.sentence_vec(np.array(vec))\n #print(sentence)\n #print(sentence.shape)\n self.word_vec.append(sentence)\n i += 1\n bar.update(i)\n self.word_vec = np.array(self.word_vec)\n print(self.word_vec.shape)", "def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")", "def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)", "def embed(text: str) -> np.ndarray:\n n = nlp(text)\n return n.vector", "def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]", "def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding", "def embed(raw_seq, index_dict):\n return np.asarray([index_dict[word.lower()]\n if word.lower() in index_dict\n else index_dict[OOV_TOKEN] for word in raw_seq])", "def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings", "def generate_text(model, w2vmodel, nb_epoch, length=75, max_seq_length=20, seed=\"Rain drop drop top\"):\n global sample\n generated = ''\n sequences = seed\n\n generated += seed\n\n #clean seed\n seed=re.sub(r'<[^<]+?>', '', seed)\n #remove encoding characters like \\x86\n seed=re.sub(r'[^\\x00-\\x7f]','',seed)\n seed=re.sub(r'\\#','',seed)\n #remove punctuation\n seed=re.sub(r'[^A-Za-z0-9\\s]','',seed)\n\n #shorten if longer than max_seq_length\n seed = seed.split(' ')[:max_seq_length]\n\n word_ix_list = []\n for word in seed:\n try:\n word = word_to_ix(word,w2vmodel)\n except:\n #since we're using -1 as a null word (why we also pad with the not in vocab index), we'll use that for words that aren't in the word2vec model\n print('Warning: {0} not contained in training vocabulary. It will be ignored when computing output.'.format(word))\n word = word_to_ix('_UNSEEN_',w2vmodel)\n word_ix_list.append(word)\n\n #pad word_list with the unseen word2vec if shorter than max_seq_length\n word_ix_list = [word_to_ix('_UNSEEN_',w2vmodel)] * (max_seq_length-len(word_ix_list)) + word_ix_list\n\n for temp in [0.2, 0.5, .75, 1.0]:\n print('temperature: ', temp)\n for word in range(length):\n #reshape wordlist\n word_ix_list = np.asarray(word_ix_list).reshape(1,max_seq_length)\n\n #prediction = model.predict(x=word_ix_list)\n #next_ix = np.argmax(prediction)\n prediction = model.predict(x=word_ix_list,verbose=0)[0]\n next_ix = sample(prediction, temp)\n predicted_word = ix_to_word(next_ix,w2vmodel)\n\n generated += (' ' + predicted_word) #add predicted word to the generated output\n\n #remove first word from the word list to reduce the array for the max sequence length for the model\n word_ix_list = np.append(word_ix_list,next_ix)\n word_ix_list.shape\n word_ix_list = np.delete(word_ix_list,0,0)\n print(generated)\n print('-----')\n #print(generated)\n return", "def generate_sentence(model, opener_words):\n\n sentence=[]\n #sentences between 3 and 15 words\n length= random.randint(3,6)\n keys=model.keys()\n bigram=random.choice(list(keys))\n\n #choose a first word that can be a starter word\n while bigram[0] not in opener_words:\n bigram=random.choice(list(keys))\n #iterate until sentence is correct length\n for i in range(0,length):\n matches=[]\n found=False\n while not found:\n\n #search in keys for key[0] to match the bigram[1]\n for key in keys:\n regex=re.compile(r\"\\b%s\\b\"%bigram[1])\n result=regex.match(key[0])\n if result:\n matches.append(key)\n found=True\n if not found:\n matches=[]\n i=0\n bigram=random.choice(list(keys))\n sentence.pop()\n\n #add first member of bigram to sentence list\n sentence.append(bigram[1])\n #choose next bigram from the list of matches\n bigram=random.choice(matches)\n\n #combine strings from list\n return \" \".join(sentence)", "def Emojify_V2(input_shape, word_to_vec_map, word_to_index):\n \n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph.\n # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).\n sentence_indices = Input(shape = input_shape, dtype = 'int32')\n \n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\n # def pretrained_embedding_layer(word_to_vec_map, word_to_index): # return embedding_layer\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\n \n # Propagate sentence_indices through your embedding layer\n # (See additional hints in the instructions).\n embeddings = embedding_layer(sentence_indices) \n \n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # The returned output should be a batch of sequences.\n X = LSTM(units = 128, return_sequences = True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(rate = 0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # The returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(units = 128, return_sequences = False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(rate = 0.5)(X) \n # Propagate X through a Dense layer with 5 units\n X = Dense(units = 5)(X)\n # Add a softmax activation\n X = Activation(activation = 'softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs = sentence_indices, outputs = X)\n \n ### END CODE HERE ###\n \n return model", "def generate_words_greedily(self, model, session, X, words_to_idx):\n \n Xorig_clean = self.cleanOutput(X, words_to_idx)\n \n for i in range(len(X)):#iterate over allscentences\n #set eos pointer to eos index\n p_eos = np.argwhere(np.array(X[i])==words_to_idx['<eos>'])[0][0] # 2 is eos but would be better using the dict\n while True:\n #compute predictions\n feed_dict = {self.input_x: np.array(X[i]).reshape((1,29)),\n self.input_y: np.array(X[i]).reshape((1,29))} # input_y is not needed\n \n prediction, sentence_probability = session.run([self.predictions, self.sentence_probability], feed_dict)\n \n lastpred = prediction[0,p_eos-1]\n X[i][p_eos]=lastpred\n \n p_eos += 1\n if lastpred == words_to_idx['<eos>'] or p_eos==29: break\n \n #postprocess X\n Xclean = self.cleanOutput(X, words_to_idx)\n self.create_submission_file(Xorig_clean, task='originalX')\n self.create_submission_file(Xclean, task='continuation')", "def glove_embedding(self, word_index, padded, embedding_matrix, part='q'):\n print(\"*\" * 50, \"Start Glove embedding process\", \"*\" * 50)\n start_time = time()\n\n\n MAX_SEQ_LEN = None\n if part == 'q':\n MAX_SEQ_LEN = self.MAX_Q_SEQ_LEN\n elif part == 'a':\n MAX_SEQ_LEN = self.MAX_A_SEQ_LEN\n else:\n print(f\"Please indicate you want embedding question part or answer part\")\n\n\n input_layer = Input(shape=(MAX_SEQ_LEN,), dtype='int32')\n embedding_layer = Embedding(input_dim = len(word_index) + 1,\n output_dim = self.EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQ_LEN,\n trainable=False)(input_layer)\n # (number of sample, MAX_SEQ_LEN, EMBEDING_DIM)\n model = Model(inputs=input_layer, outputs=embedding_layer)\n model.compile('rmsprop', 'mse')\n output_array = model.predict(padded)\n\n cost_time = round((time() - start_time), 4)\n print(\"*\" * 40, \"End Glove embedding() with {} seconds\".format(cost_time), \"*\" * 40, end='\\n\\n')\n\n return output_array, embedding_layer", "def forward(self, doc):\n out = torch.tensor([]).float().to(self.device)\n\n for i in range(len(doc)):\n sentences_raw = sentencesplit(cleantxt(doc[i]))\n sentences_ready = torch.tensor([]).float().to(self.device)\n for sentence in sentences_raw:\n sentence = sentence.split()\n if sentence == []:\n continue\n lookup_tensor = torch.tensor([]).long().to(self.device)\n for word in sentence:\n if word in self.embedd_dict:\n lookup_tensor = torch.cat((lookup_tensor,\n torch.LongTensor([self.embedd_dict[word]])), 0)\n else:\n lookup_tensor = torch.cat((lookup_tensor, torch.LongTensor([0])), 0)\n # Word embedding\n xw = self.word_embedding(lookup_tensor).view(1, -1, self.embedding_dim).to(self.device)\n # Word GRU\n self.hidden_gru_words = self.init_hidden_words()\n hw, self.hidden_gru_words = self.gru_word(xw, self.hidden_gru_words)\n # Word MLP\n uw = nn.Tanh()(self.MLP_word(hw)).to(self.device)\n # Word attention\n attention_score = torch.matmul(uw, self.attention_word).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(uw.size(0), uw.size(1), 1).to(self.device)\n scored_x = (hw * attention_score).to(self.device)\n s = torch.sum(scored_x, dim=1).to(self.device)\n #collecting sentences\n sentences_ready = torch.cat((sentences_ready, s), 0)\n # Sentence GRU\n if len(sentences_ready) == 0:\n out = torch.cat((out,\n torch.randn(1, self.number_cat).to(self.device)), 0).to(self.device)\n continue\n sentences_ready_gru = sentences_ready.view(1, -1, self.embedding_dim).to(self.device)\n self.hidden_gru_sentences = self.init_hidden_sentences()\n hs, self.hidden_gru_sentences = self.gru_sentence(torch.tensor(sentences_ready_gru), self.hidden_gru_sentences)\n # SENTENCE MLP\n us = nn.Tanh()(self.MLP_sentence(hs)).to(self.device)\n # Sentence attention\n attention_score = torch.matmul(us, self.attention_sentence).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(us.size(0), us.size(1), 1).to(self.device)\n scored_x = (hs * attention_score).to(self.device)\n v = torch.sum(scored_x, dim=1).to(self.device)\n # classification\n p = self.MLP_classification(v).to(self.device)\n out = torch.cat((out, p.float()), 0).float().to(self.device)\n return out", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def embedding_sentence_with_model(input_file, save_path, max_length, model_path):\n # load glove model\n model = gensim.models.KeyedVectors.load_word2vec_format(model_path)\n lines = _read_csv(input_file)\n split_lines = []\n label_list = []\n for line in lines:\n split_lines.append(sentence_split(line[1], max_length))\n label_list.append(int(line[2]))\n del lines\n\n writer = tf.python_io.TFRecordWriter(save_path)\n for index, line in enumerate(split_lines):\n bytes_words = []\n for word in line:\n if word in model:\n bytes_words.extend(model[word])\n else:\n bytes_words.extend([0] * 300)\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(float_list=tf.train.FloatList(value=bytes_words))\n }))\n writer.write(example.SerializeToString())", "def lemmatize_verbs(self):\n lemmas = []\n # lemmas = \"\"\n for word in self.words:\n lemma = wn.lemmatize(word, pos='v')\n lemmas.append(lemma)\n # lemmas += f\"{lemma} \"\n self.words = lemmas\n return self", "def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model", "def translate_beam_search(source_sentence: List[int], model: Seq2SeqAttentionModel,\n beam_width: int, max_length=10) -> Tuple[List[int], float]:\n encoder_hiddens = encode_all(source_sentence, model)\n beam_elems = []\n # stack x hid_dim\n prev_hidden = encoder_hiddens[-1]\n prev_context = torch.zeros(model.hidden_dim)\n\n beam_elems= [([SOS_token], float(0), prev_hidden, prev_context)]\n candidate_translations = []\n available_width = beam_width\n\n for i in range(max_length):\n if available_width >0:\n candidate_beam_elems = []\n for b in range(len(beam_elems)):\n prev_predict, prev_log_prob, prev_hidden, prev_context = beam_elems[b]\n probs, prev_hidden, prev_context, _ = decode(prev_hidden, encoder_hiddens, prev_context,\n prev_predict[-1], model)\n log_probs = torch.log(probs)\n top_log_probs, top_preds = torch.topk(log_probs,available_width)\n for k in range(len(top_log_probs)):\n curr_log_prob = prev_log_prob + top_log_probs[k].item()\n curr_pred_list = prev_predict + [top_preds[k].item()]\n candidate = (curr_pred_list, curr_log_prob, prev_hidden, prev_context)\n candidate_pos = -1\n for pos in range(len(candidate_beam_elems)):\n if curr_log_prob > candidate_beam_elems[pos][1]:\n candidate_pos = pos\n if not candidate_pos == -1:\n candidate_beam_elems.insert(candidate_pos+1, candidate)\n elif len(candidate_beam_elems) < available_width:\n candidate_beam_elems.append(candidate)\n if len(candidate_beam_elems) > available_width:\n candidate_beam_elems.pop()\n\n beam_elems = []\n for candidate in candidate_beam_elems:\n if candidate[0][-1] == EOS_token or i==(max_length-1):\n candidate_translations.append(candidate)\n available_width -= 1\n else:\n beam_elems.append(candidate)\n\n max_prob = -math.inf\n best_elem = -1\n for pos in range(len(candidate_translations)):\n norm_prob = candidate_translations[pos][1]/len(candidate_translations[pos][0])\n if norm_prob > max_prob:\n max_prob = norm_prob\n best_elem = pos\n\n # remove SOS token from the beginning\n del candidate_translations[best_elem][0][0]\n\n return candidate_translations[best_elem][0], candidate_translations[best_elem][1]", "def text2vec(doc_tok, model, dim=300):\n doc_embedding = np.zeros(dim)\n valid_words = 0\n for word in doc_tok:\n if word in model:\n valid_words += 1\n doc_embedding += model.query(word)\n else:\n continue\n if valid_words > 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding", "def _add_seq2seq(self):\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n \n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n\n with tf.variable_scope('embedding'):\n if hps.pretrained_embeddings:\n word2vec = load_embeddings(hps.embeddings_path, self._vocab.word2id, hps.rand_unif_init_mag)\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=tf.constant_initializer(word2vec))\n # self.assign_embedding = tf.assign(self.embedding, word2vec)\n else:\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(self.embedding) # add to tensorboard\n\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_enc_inputs = tf.nn.embedding_lookup(self.embedding, self._enc_batch)\n if self._hps.hier:\n enc_batch_sections = tf.unstack(self._enc_batch_sections, axis=1)\n sec_emb_enc_inputs = [tf.nn.embedding_lookup(self.embedding, section)\n for section in enc_batch_sections]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding, x)\n for x in tf.unstack(self._dec_batch, axis=1)]\n\n\n # Hierarchical attention model\n if self._hps.hier:\n with tf.variable_scope('encoder'), tf.device(self._next_device()):\n sec_enc_outs = []\n states_fw = []\n states_bw = []\n states = []\n\n # level 1, encode words to sections\n with tf.variable_scope(\"word_level_encoder\", reuse=tf.AUTO_REUSE) as scope:\n encoder_outputs_words = []\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n fw_st, bw_st = None, None\n if self._hps.use_do: # DropOut\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n for i in range(self._hps.num_sections):\n encoder_tmp_output, (fw_st, bw_st) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs=sec_emb_enc_inputs[i], dtype=tf.float32,\n sequence_length=self._batch_sections_len[:,i], swap_memory=True, initial_state_bw=bw_st, initial_state_fw=fw_st)\n # concatenate the forwards and backwards states\n encoder_tmp_output = tf.concat(axis=2, values=encoder_tmp_output) #shape=[batch x seq_len x hidden_size]\n \n encoder_outputs_words.append(encoder_tmp_output)\n # instead of concating the fw and bw states, we use a ff network\n combined_state = self._reduce_states(fw_st, bw_st)\n states.append(combined_state)\n scope.reuse_variables()\n \n # level 2, encode sections to doc\n encoder_outputs_words = tf.stack(encoder_outputs_words, axis=1) # shape [batch x num_sections x seq_len x hidden_size]\n shapes = encoder_outputs_words.shape\n encoder_outputs_words = tf.reshape(encoder_outputs_words, (shapes[0].value, -1, shapes[-1].value)) #shape=[batch x (seq_len * num_sections) x hidden_size]\n\n doc_sections_h = tf.stack([s.h for s in states], axis=1) # [batch x num_sections x hidden_size]\n doc_sections_c = tf.stack([s.c for s in states], axis=1) # [batch x num_sections x hidden_size]\n\n with tf.variable_scope(\"section_level_encoder\"):\n if FLAGS.section_level_encoder == 'RNN':\n cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do:\n cell_fw_1 = tf.contrib.rnn.DropoutWrapper(cell_fw_1, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw_1 = tf.contrib.rnn.DropoutWrapper(cell_bw_1, output_keep_prob=1.0 - self._hps.do_prob)\n encoder_output_sections, (fw_st_2, bw_st_2) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, inputs=doc_sections_h, sequence_length=self._doc_sec_lens, dtype=tf.float32, swap_memory=True)\n encoder_output_sections = tf.concat(axis=2, values=encoder_output_sections)\n doc_sections_state = self._reduce_states(fw_st_2, bw_st_2)\n else:\n if FLAGS.section_level_encoder == 'AVG': # average section cells\n doc_sections_state_h = tf.reduce_mean(doc_sections_h, axis=1)\n doc_sections_state_c = tf.reduce_mean(doc_sections_c, axis=1)\n elif FLAGS.section_level_encoder == 'FF': # use a feedforward network to combine section cells\n doc_sections_state_h = tf.reshape([doc_sections_h.shape[0].eval(), -1])\n doc_sections_state_h = tf.layers.dense(\n inputs=doc_sections_state_h,\n units=self._hps.hidden,\n activation=tf.nn.relu) \n doc_sections_state_c = tf.reshape([doc_sections_c.shape[0].eval(), -1])\n doc_sections_state_c = tf.layers.dense(\n inputs=doc_sections_state_c,\n units=self._hps.hidden,\n activation=tf.nn.relu)\n else:\n raise AttributeError('FLAGS.section_level_encoder={} is not a valid option'.format(FLAGS.section_level_encoder))\n doc_sections_state = tf.contrib.rnn.LSTMStateTuple(doc_sections_state_c, doc_sections_state_h)\n encoder_output_sections = doc_sections_h \n \n elif not self._hps.multi_layer_encoder:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n # concatenate the forwards and backwards states\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs)\n \n # stack n layers of lstms for encoder\n elif self._hps.multi_layer_encoder:\n # TODO: check\n for layer_i in xrange(self._hps.enc_layers):\n with tf.variable_scope('encoder%d'%layer_i), tf.device(\n self._next_device()):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do: # add dropout\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n emb_enc_inputs, (fw_st, bw_st) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n emb_enc_inputs = tf.concat(axis=2, values=emb_enc_inputs)\n encoder_outputs = emb_enc_inputs\n \n if self._hps.hier:\n self._enc_sec_states = encoder_output_sections\n self._enc_states = encoder_outputs_words \n else:\n self._enc_states = encoder_outputs\n self._enc_sec_states = None\n \n # convert the encoder bidirectional hidden state to the decoder state\n # (unidirectional) by an MLP\n if self._hps.hier:\n self._dec_in_state = doc_sections_state\n else:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n self._dec_in_state = self._reduce_states(fw_st, bw_st) \n \n # Add the decoder\n\n with tf.variable_scope('decoder'), tf.device(self._next_device()):\n cell = tf.contrib.rnn.LSTMCell(\n self._hps.hidden_dim,\n state_is_tuple=True,\n initializer=self.rand_unif_init)\n \n # We need to pass in the previous step's coverage vector each time\n prev_coverage = self.prev_coverage\\\n if hps.mode==\"decode\" and self._hps.coverage \\\n else None \n \n \n if self._hps.hier:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.attn_dists_sec =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n self._enc_sec_states,\n num_words_section=self._batch_sections_len,\n enc_padding_mask=self._enc_padding_mask,\n enc_section_padding_mask=self._enc_section_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n temperature=self._hps.temperature\n )\n \n else:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, _ =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n encoder_section_states=None,\n num_words_section=None,\n enc_padding_mask=self._enc_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n ) \n \n\n # Project decoder output to vocabulary\n with tf.variable_scope('output_projection'), tf.device(self._next_device()):\n if self._hps.output_weight_sharing:\n # share weights of embedding layer with projection\n # self.embedding is in shape [vsize, hps.emb_dim]\n w_proj = tf.get_variable('w_proj', [self._hps.emb_dim, self._hps.hidden_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n w = tf.tanh(tf.transpose(tf.matmul(self.embedding, w_proj))) # shape = [vsize, hps.hidden_dim]\n \n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n else: \n w = tf.get_variable('w', [self._hps.hidden_dim, vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # vocabulary score at each decoder step\n vocab_scores = []\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, b)) # apply the linear layer\n\n # the final vocab distribution for each decoder time step\n # shape of each element is [batch_size, vsize]\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] \n\n \n # pointing / generating\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n# log_dists = [tf.log(dist) for dist in final_dists]\n else:\n# log_dists = [tf.log(dist) for dist in vocab_dists]\n final_dists = vocab_dists\n \n\n # Calculate Losses:\n \n if self._hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'), tf.device(self._next_device()):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the gold target words\n # will be list length max_dec_steps containing shape (batch_size)\n loss_per_step = [] \n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n # The indices of the target words. shape (batch_size)\n targets = self._target_batch[:,dec_step] \n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n # shape (batch_size). loss on this step for each batch\n gold_probs = tf.gather_nd(dist, indices)\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n \n\n else: # baseline model\n # this applies softmax internally\n self._loss = tf.contrib.seq2seq.sequence_loss(\n tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if self._hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + self._hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n \n # ---------------------------/\n\n\n if self._hps.mode == \"decode\":\n assert len(final_dists) == 1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)", "def embed(self, word: Any) -> dy.Expression:\n raise NotImplementedError('embed must be implemented in Embedder subclasses')", "def generate_sentence(word1, word2, length, vocab, model):\n reverse_vocab = {idx: word for word, idx in vocab.items()}\n output_string = np.zeros((1, length), dtype=np.int)\n output_string[:, 0: 2] = vocab[word1], vocab[word2]\n\n for end in range(2, length):\n start = end - 2\n output_string[:, end] = np.argmax(model(output_string[:, start:end]), axis=1)\n text = [reverse_vocab[i] for i in list(output_string[0])]\n \n print(\" \".join(text))", "def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text", "def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output", "def create_text_sequence_feature(fl, sentence, sentence_len, vocab):\n sentence_transformed = transform_sentence(sentence, vocab)\n for word_id in sentence_transformed:\n fl.feature.add().int64_list.value.extend([word_id])\n return fl", "def embed_seq(self,X_seq,Y_seq):\n X_embed = -tr.ones(len(X_seq),self.og_signal_dim+self.og_noise_dim)\n # find trials of corresponding types\n pm_trials_bool = X_seq >= self.ntokens_og\n pm_trials = np.where(pm_trials_bool)\n og_trials = np.where(np.logical_not(pm_trials_bool))\n # take signal_dim (time,edim_signal_dim)\n pm_embeds = self.emat_pm[X_seq[pm_trials] - self.ntokens_og] \n og_embeds = self.emat_og[X_seq[og_trials]] \n # make noise (time,edim_noise)\n pm_noise = tr_noise_pm([len(pm_embeds),self.pm_noise_dim])\n og_noise = tr_noise_og([len(og_embeds),self.og_noise_dim])\n # cat signal_dim and noise (time,edim)\n pm_embeds = tr.cat([pm_embeds,pm_noise],-1)\n og_embeds = tr.cat([og_noise,og_embeds],-1)\n # put into respective positions\n X_embed[pm_trials] = pm_embeds\n X_embed[og_trials] = og_embeds \n # include batch dim \n X_embed = tr.unsqueeze(X_embed,1)\n Y_embed = tr.unsqueeze(tr.LongTensor(Y_seq),1)\n return X_embed,Y_embed", "def sample_beam(model, input_embedding, char2idx, idx2char, k=5, maxlen=30,\n start='START', use_head=True):\n with torch.no_grad():\n device = input_embedding.device\n softmax = nn.Softmax(dim=1)\n if use_head:\n input_embedding = input_embedding.view(1, -1)\n\n inp = [torch.LongTensor([char2idx[start]]).to(device)]\n inp = nn.utils.rnn.pack_sequence(inp)\n out, hidden = model(input_embedding, inp, use_head=use_head)\n\n out = softmax(out.data).view(-1).cpu().numpy()\n max_k = np.argsort(out)[-k:][::-1]\n oldprobs = out[max_k]\n words = [[i] for i in max_k]\n inp = pack([torch.LongTensor([j]).to(device) for j in max_k])\n\n if model.mode == 'LSTM':\n hidden0 = torch.cat([hidden[0] for i in range(k)], dim=1)\n hidden1 = torch.cat([hidden[1] for i in range(k)], dim=1)\n hidden = hidden0, hidden1\n else:\n hidden = torch.cat([hidden for i in range(k)], dim=1)\n WORDS = []\n for c in range(maxlen):\n out, hidden = model(hidden, inp, use_head=False)\n out = softmax(out.data).cpu().numpy()\n\n #print(out.shape)\n inpnp = inp.data.detach().cpu().numpy()\n done = np.where(inpnp == char2idx['END'])\n out[done] = 0\n if len(out[done]) != 0:\n #print(out[done].shape)\n for d in done[0]:\n out[d][char2idx['END']] = 1\n #print(done)\n\n #print(out)\n #print(out[done])\n out = (oldprobs.reshape(-1, 1)*out)\n max_k = np.argsort(out)[:, -k:][:, ::-1]\n\n #print(max_k)\n probs = np.array([out[i][max_k[i]] for i in range(k)])\n #print(probs)\n flat = probs.reshape(-1)\n max_k2 = np.argsort(flat)[::-1][:k]\n word_inds = max_k2//k\n next_chars_inds = max_k2%k\n\n oldprobs = flat[max_k2]\n #print(oldprobs)\n\n new_words = []\n new_inp = []\n for i, word_ind in enumerate(word_inds):\n next_char = max_k[word_ind][next_chars_inds[i]]\n if next_char == char2idx['END']:\n #print(\"HIT AN END at word {}\".format(word_ind))\n WORDS.append((words[word_ind], oldprobs[i]))\n #the_word = words[word_ind]\n #return ''.join([idx2char[i] for i in the_word])\n new_inp.append(torch.LongTensor([next_char]).to(device))\n word = words[word_ind][:]\n word = word + [next_char]\n new_words.append(word)\n words = new_words[:]\n\n if model.mode == 'LSTM':\n h1, h2 = hidden\n h1, h2 = h1[0][word_inds].view(1, k, -1), h2[0][word_inds].view(1, k, -1)\n hidden = h1, h2\n else:\n hidden = hidden[0][word_inds].view(1, k, -1)\n\n\n inp = pack(new_inp)\n\n return [''.join([idx2char[i] for i in word if i != char2idx['END']]) for word in words], oldprobs", "def forward(self, words):\n seq_lengths = words.data.ne(0).long().sum(dim=1)\n seq_lengths, perm_idx = seq_lengths.sort(0, descending=True)\n _, rev_perm_idx = perm_idx.sort(0)\n seq_tensor = words[perm_idx]\n\n embeds = self.embeddings(seq_tensor)\n packed_input = pack_padded_sequence(embeds, list(seq_lengths), batch_first=True)\n packed_output, (ht, ct) = self.char_lstm(packed_input)\n concatenated_hiddens = torch.cat([ht[0], ht[1]], dim=-1)\n return concatenated_hiddens[rev_perm_idx]", "def set_glove_embedding(self,fpath,embedding_dim):\n\t\temb = np.random.randn(self._count,embedding_dim)\n#\ttf.logging.info(emb[0])\n\t\twith open(fpath) as f: #python 3.x support \n\t\t\tfor k,line in enumerate(f):\n\t\t\t\tfields = line.split()\n\t\t\t\tif len(fields) - 1 != embedding_dim:\n\t\t\t\t\t# Sometimes there are funny unicode parsing problems that lead to different\n\t\t\t\t\t# fields lengths (e.g., a word with a unicode space character that splits\n\t\t\t\t\t# into more than one colum n). We skip those lines. Note that if you have\n\t\t\t\t\t# some kind of long header, this could result in all of your lines getting\n\t\t\t\t\t# skipped. It's hard to check for that here; you just have to look in the\n\t\t\t\t\t# embedding_misses_file and at the model summary to make sure things look\n\t\t\t\t\t# like they are supposed to.\n\t\t\t\t\t#logger.warning(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t# embedding_dim, len(fields) - 1, line)\n\t\t\t\t\traise Exception(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t\t\t\t\t embedding_dim, len(fields) - 1, line)\n\t\t\t\t\tcontinue\n\t\t\t\tword = fields[0]\n\t\t\t\tif word in self._word_to_id:\n\t\t\t\t\tvector = np.asarray(fields[1:], dtype='float32')\n\t\t\t\t\temb[self._word_to_id[word]] = vector\n#\t\tif k%1000 == 0:\n#\t\t tf.logging.info('glove : %d',k)\n\t\tself.glove_emb = emb", "def makeD2VLabels(sequences, **kargs): # refactored from seqAnalzyer \n # from collections import namedtuple # can customize your own attributes (instead of using gensim's attributes such as words and tags)\n import gensim\n def index_label(i): \n return '%s_%s' % (label_prefix, i)\n\n # [params] redundant? \n # cohort_name = kargs.get('cohort', 'diabetes')\n # seq_ptype = kargs.get('seq_ptype', 'regular') # values: regular, random, diag, med, lab ... default: regular\n\n # attributes = D2V.label_attributes # ['codes', 'labels', ] \n\n # [old] use gensim.models.doc2vec.TaggedDocument\n # LabelDoc = namedtuple('LabelDoc', attributes) # a namedtuple with 2 attributes words and tags\n # LabelDoc = namedtuple('LabelDoc', ['words', 'labels'])\n label_prefix = seqparams.TDoc.doc_label_prefix \n exclude = set(string.punctuation)\n all_docs = []\n\n # [input]\n assert sequences is not None and len(sequences) > 0\n\n labels = kargs.get('labels', []) # precomputed sentence labels \n if not labels: \n # df_ldoc = labelDoc(sequences, load_=load_label, seqr='full', sortby='freq', seq_ptype=seq_ptype)\n raise ValueError, \"No user-defined labels given.\"\n \n # [note] below is for generating surrogate class labels \n # labeling_routine = kargs.get('labeler', labelDocByFreqDiag) # any labelDoc*\n # assert hasattr(labeling_routine, '__call__'), \"Invalid labeler: %s\" % labeling_routine\n # labels = mlabels = labeling_routine(sequences, **kargs)\n # labelx = labelize()\n else: \n assert len(labels) == len(sequences)\n\n # label normalization: ensure that each label is a list \n labelx = TDocTag.labelAsIs(labels) # TDocTag.canonicalize(labels)\n print('makeD2VLabels> doc tag examples:\\n%s\\n' % labelx[:10])\n # each element in tagx should be a list\n\n for i, sen in enumerate(sequences):\n if isinstance(sen, str): \n word_list = sen.split() \n else: \n word_list = sen # split is already done\n\n # For every sentences, if the length is less than 3, we may want to discard it\n # as it seems too short. \n # if len(word_list) < 3: continue # filter short sentences\n \n tagl = labelx[i] # condition tagl is in the list (multilabel) format\n assert isinstance(tagl, list)\n if isinstance(sen, str): \n sen = ''.join(ch for ch in sen if ch not in exclude) # filter excluded characters\n\n all_docs.append(gensim.models.doc2vec.TaggedDocument(sen.split(), tagl))\n # all_docs.append(LabelDoc(sen.split(), tagl)) # format: sequence (list of tokens) + labels (a list of labels)\n else: \n\n all_docs.append(gensim.models.doc2vec.TaggedDocument(sen, tagl))\n # all_docs.append(LabelDoc(sen, tagl)) # assuming unwanted char already filetered \n\n # Print out a sample for one to view what the structure is looking like \n # print all_docs[0:10]\n for i, doc in enumerate(all_docs[0:5]+all_docs[-5:]): \n print('> doc #%d: %s' % (i, doc))\n # [log] e.g. doc #3: LabelDoc(words=['583.81', '250.41', 'V45.81', ... , '48003'], tags=['362.01_599.0_250.51'])\n\n return all_docs", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer", "def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict", "def generate_title(model, tokenizer, photo, max_length):\n in_text = \"startseq\"\n vocab = len(tokenizer.word_index) + 1\n prev_word = \"\"\n\n for i in range(max_length):\n sequence = tokenizer.texts_to_sequences([in_text])[0]\n sequence = pad_sequences([sequence], maxlen=max_length)\n yhat = model.predict([photo, sequence], verbose=0)\n yhat = random.choice(list(range(vocab)), 1, p=yhat[0])\n # yhat = argmax(yhat)\n word = word_for_id(yhat, tokenizer)\n\n if word is None:\n break\n\n if word == prev_word:\n pass\n\n in_text += \" \" + word\n\n prev_word = word\n\n if word == \"endseq\":\n break\n\n return in_text", "def preprocess(self, sequence, word2id, trg=True):\r\n if trg:\r\n story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')] + [EOS_token]\r\n else:\r\n story = []\r\n for i, word_triple in enumerate(sequence):\r\n story.append([])\r\n for ii, word in enumerate(word_triple):\r\n temp = word2id[word] if word in word2id else UNK_token\r\n story[i].append(temp)\r\n try:\r\n story = torch.Tensor(story)\r\n except:\r\n print(sequence)\r\n print(story)\r\n # print('111111111111111111111111')\r\n return story", "def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs", "def set_embeddings(self):", "def one_reference_text_pipeline(translation):\n yield translation\n tokens_list = hparams[\"tokenizer\"].encode_as_ids(translation)\n yield tokens_list\n tokens_bos = torch.LongTensor([hparams[\"bos_index\"]] + (tokens_list))\n yield tokens_bos\n tokens_eos = torch.LongTensor(tokens_list + [hparams[\"eos_index\"]])\n yield tokens_eos\n tokens = torch.LongTensor(tokens_list)\n yield tokens", "def embed_sent(self, x: Any) -> expression_seqs.ExpressionSequence:\n # single mode\n if not batchers.is_batched(x):\n expr = expression_seqs.ExpressionSequence(expr_list=[self.embed(word) for word in x])\n # minibatch mode\n elif type(self) == LookupEmbedder:\n embeddings = []\n for word_i in range(x.sent_len()):\n batch = batchers.mark_as_batch([single_sent[word_i] for single_sent in x])\n embeddings.append(self.embed(batch))\n expr = expression_seqs.ExpressionSequence(expr_list=embeddings, mask=x.mask)\n else:\n assert type(x[0]) == sent.SegmentedSentence, \"Need to use CharFromWordTextReader for non standard embeddings.\"\n embeddings = []\n all_embeddings = []\n for sentence in x:\n embedding = []\n for i in range(sentence.len_unpadded()):\n embed_word = self.embed(sentence.words[i])\n embedding.append(embed_word)\n all_embeddings.append(embed_word)\n embeddings.append(embedding)\n # Useful when using dy.autobatch\n dy.forward(all_embeddings)\n all_embeddings.clear()\n # Pad the results\n expr = batchers.pad_embedding(embeddings)\n\n return expr", "def main():\n logging.basicConfig(level=logging.DEBUG)\n custom_embedding = True\n\n # Download embeddings'\n if custom_embedding:\n embedding_path = '../data/custom_embedding.pkl'\n embedding_index_path = '../data/custom_vocab_index.pkl'\n logging.info('Pulling custom embedding from: {}, and custom vocab from: {}'.format(embedding_path, embedding_index_path))\n embedding_matrix = pickle.load(open(embedding_path, 'rb'))\n embedding_index_lookup = pickle.load(open(embedding_index_path, 'rb'))\n\n else:\n logging.warning('Downloading embedding. If downloading for the first time, this make take 5-10 minutes.')\n embedding_url = 'https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz'\n embedding_path = '~/nlp_example/'\n embedding_filename = 'GoogleNews-vectors-negative300.bin.gz'\n lib.download_file(embedding_url, embedding_path, embedding_filename)\n\n # Unpack embedding\n model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path + '/' + embedding_filename, binary=True)\n embedding_matrix = model.syn0\n embedding_index_lookup = dict([(k, v.index) for k, v in model.vocab.items()])\n\n # Create thesaurus\n thesaurus = Thesaurus(embedding_matrix, embedding_index_lookup)\n\n # Find nearest neighbors for examples\n print(thesaurus.synonyms('day'))\n print(thesaurus.synonyms('top'))\n print(thesaurus.synonyms('bottom'))\n print(thesaurus.synonyms('cat'))\n print(thesaurus.synonyms('grown'))\n\n\n pass", "def pretrained(name=\"glove_100d\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(WordEmbeddingsModel, name, lang, remote_loc)", "def embed_sentence(self, sentence:List[str]):\n # prepare the input that can be fed to bert model\n encoded_sentence, indices_subwords = self._prepare_input(sentence[0])\n with torch.no_grad():\n bert_output = self.bert_model.forward(input_ids=encoded_sentence)\n \n # take the sequence of the last four hidden states (the last element of the tuple returned by the bert model)\n # list of tensors (batch_size x num_of_splitted_words x embedding_dim)\n bert_output = list(bert_output[-1][-4:])\n bert_output.reverse()\n \n # stack the hidden states in a tensor (4 x batch_size x num_of_splitted_words x embedding_dim)\n hidden_states = torch.stack(bert_output, axis=0)\n # sum the hidden states (batch_size x num_of_splitted_words x embedding_dim)\n sum_hidden_states = torch.sum(hidden_states, axis=0)\n # merge the words splitted in subwords by the tokenizer (batch_size x sentence_length x embedding_dim)\n embed_output = self._merge_embeddings(sum_hidden_states[0], indices_subwords)\n return embed_output", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer", "def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding", "def embedding_sentence(input_file, save_path, max_length):\n lines = _read_csv(input_file)\n split_lines = []\n label_list = []\n for line in lines:\n split_lines.append(sentence_split(line[1], max_length))\n label_list.append(int(line[2]))\n del lines\n\n writer = tf.python_io.TFRecordWriter(save_path)\n for index, line in enumerate(split_lines):\n bytes_words = []\n for word in line:\n bytes_words.append(str.encode(word))\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=bytes_words))\n }))\n writer.write(example.SerializeToString())", "def Aut(A):\n return Embeddings(A,A)", "def generate_analogy(word1, word2, word3, model):\n LoM = model.most_similar(positive=[word2, word3], negative=[word1], topn=100)\n return LoM", "def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings", "def _gen_words(sentence, labels):\r\n word = \"\"\r\n words = []\r\n for token, label in zip(sentence, labels):\r\n word += token\r\n if label in [1, 3]:\r\n words.append(word)\r\n word = \"\"\r\n return words", "def embed(self, lang, seq, mask):\n inputs = []\n batch_size, seq_len = seq.size()\n inputs.append(self.embedder(seq))\n if self.args.use_cluster_features:\n H = self.cluster_typology(lang)\n H = H.view(1, 1, -1).expand(batch_size, seq_len, H.numel())\n inputs.append(H)\n if self.args.use_hand_features:\n H = self.hand_typology(lang)\n H = H.view(1, 1, -1).expand(batch_size, seq_len, H.numel())\n inputs.append(H)\n if self.args.use_neural_features:\n N = self.neural_typology(seq, mask)\n N = N.view(1, 1, -1).expand(batch_size, seq_len, N.numel())\n inputs.append(N)\n return torch.cat(inputs, dim=-1)", "def translate_greedy_search(source_sentence: List[int],\n model: Seq2SeqAttentionModel, max_length=10) -> (List[int], torch.tensor):\n encoder_hiddens = encode_all(source_sentence, model)\n # stack x hid_dim\n prev_hidden = encoder_hiddens[-1]\n prev_context = torch.zeros(model.hidden_dim)\n decode_in = SOS_token\n translate_out = []\n attention_wt_list = []\n for i in range(max_length):\n probs, prev_hidden, prev_context, attention_weights = decode(prev_hidden, encoder_hiddens, prev_context, decode_in, model)\n log_probs = torch.log(probs)\n decode_in = int(torch.argmax(log_probs).item())\n translate_out.append(decode_in)\n attention_wt_list.append(attention_weights)\n if decode_in == EOS_token:\n break\n\n return translate_out, torch.stack(attention_wt_list)", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def fit(model: Word2Vec, sequences: List):\n logger.info(\"Building item vocabulary for training.\")\n model.build_vocab(sequences, progress_per=1000, update=False)\n logger.info(\"Fitting Embedding Neural Network model.\")\n model.train(sequences, epochs=model.epochs, total_examples=model.corpus_count)\n training_loss = model.get_latest_training_loss()\n logger.info(f\"Final model training loss: {training_loss}\")\n return model", "def embed_xseq(self,xseq):\n xseq_embed = self.emat[xseq]\n return xseq_embed", "def getEmbeddings(model, words):\n\tembeddings = {}\n\tfor word in words:\n\t\tembeddings[word] = model[word]\n\treturn embeddings", "def glove(data_fname='glove.840B.300d.txt', out_fname='glove.pkl'):\n words, U, dim = [], [], None\n with open(DATA_DIR + data_fname, 'rb') as f:\n for j, line in enumerate(f):\n x = line.strip().split()\n word, vector, d = x[0], np.ravel(x[1:]), len(x) - 1\n if dim is None: dim = d\n elif d != dim: raise Exception('{0}: {1}!={2}'.format(j, dim, d))\n U.append(vector)\n words.append(word)\n U = np.array(U)\n print \"Found {0} words\".format(len(words))\n print \"Found {0}x{1} embedding matrix\".format(*U.shape)\n with open(DATA_DIR + out_fname, 'wb') as f:\n cPickle.dump((words, U), f)", "def get_embeddings(vectors, text, generate_missing=False, k=300):\r\n embeddings = text.apply(lambda x: get_average_vec(x, vectors, generate_missing=generate_missing, k=k))\r\n return list(embeddings)", "def transform_sentence(sequence, vocab_processor):\n return next(vocab_processor.transform([sequence])).tolist()", "def read_old_glove(filepath):\n print('reading glove files:', filepath)\n\n word2idx = {}\n word_embed = [['0'] * 300] # word_embed[0] = [0] * 300, represent the <PAD>\n\n with open(filepath, 'r') as f:\n for idx, line in enumerate(f):\n line_list = line.split()\n word = ' '.join(line_list[: len(line_list)-300])\n embed = [num for num in line_list[len(line_list)-300:]]\n\n word2idx[word] = idx + 1\n word_embed.append(embed)\n\n return word2idx, word_embed", "def process_glove_data(filename):\r\n\r\n word_list = []\r\n embed_list = []\r\n with open(filename,encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n toks = line.split(' ')\r\n word_list.append(toks[0])\r\n vec = [float(tok) for tok in toks[1:]]\r\n embed_list.append(vec)\r\n \r\n embed = np.array(embed_list,dtype=float)\r\n embed_df = pd.DataFrame(embed,index=word_list)\r\n embed_df.index = embed_df.index.str.lower()\r\n \r\n return embed_df", "def generate_language(sent, vocab, model, end_tok=END_TOK):\n sent = [vocab[w] if w in vocab else vocab[\"<UNK>\"] for w in sent.split(' ')]\n ix = 0\n ix2vocab = {vocab[w]: w for w in vocab}\n gen_s = deepcopy(sent)\n while ix != 10:\n inp = np.array(sent).reshape(1, -1)\n probs = model(inp)\n # Sample from the model\n sample = np.random.multinomial(100, probs.flatten(), size=1)\n pred = np.argmax(sample)\n sent = sent[1:] + [pred]\n gen_s.append(pred)\n ix += 1\n if ix2vocab[pred] == end_tok:\n break\n return ' '.join([ix2vocab[jx] for jx in gen_s])", "def preprocess_seq(self, sequence, word2idx):\n story = []\n for value in sequence:\n #v = [word2idx[word] if word in word2idx else UNK_token for word in value.split()] + [EOS_token]\n story.append(word2idx[value] if value in word2idx else UNK_token)\n story = torch.Tensor(story)\n return story", "def load_word2vec_en_pretrained():\r\n log.info(\"Load W2V Model\")\r\n model = api.load(\"glove-wiki-gigaword-50\")\r\n return PreTrainedGensimEN(model)", "def embed(self, word: str) -> list:\n return self.emb_matrix[self.wordmapper[word]]", "def build_sentence(length: int, input=None):\n global other_time, database_time\n # print(other_time)\n # print(database_time)\n # print()\n while True:\n start = time.time()\n try:\n last_word = __choose_last_word()\n sentence = []\n i = 0\n while i < length:\n sentence.append(\"\")\n i += 1\n\n if input != None:\n last_word = input\n\n # start of the sentence\n sentence[length - 1] = last_word\n\n # find markov data from last word\n a = get_relation_dict(last_word, -1)\n\n second_to_last_word = __probability_roll(a)\n sentence[length - 2] = second_to_last_word\n i = length - 3\n while i >= 0:\n word_1 = sentence[i + 2]\n word_2 = sentence[i + 1]\n\n # words 2 steps away and one step away respectively\n prev_words_2 = get_relation_dict(word_1, 2)\n prev_words_1 = get_relation_dict(word_2, 1)\n prev_word_list = [prev_words_1, prev_words_2]\n\n if (i + 3) < length:\n word_0 = sentence[i + 3]\n prev_words_3 = get_relation_dict(word_0, 3)\n prev_word_list.append(prev_words_3)\n\n if (i + 4) < length:\n word_00 = sentence[i + 4]\n prev_words_4 = get_relation_dict(word_00, 4)\n prev_word_list.append(prev_words_4)\n\n try:\n # print(sentence)\n potential_words = __find_union(prev_word_list)\n # print(len(potential_words))\n sentence[i] = __probability_roll(potential_words)\n j = 0\n while j < 3:\n # print(\"sentence i, i+1: {}, {}\".format(sentence[i],sentence[i+1]))\n if sentence[i] == sentence[1 + 1]:\n sentence[i] = __probability_roll(potential_words)\n j += 1\n # print(\"Union of {} spaces\".format(str(len(prev_word_list))))\n except IndexError:\n sentence[i] = __probability_roll(prev_words_1)\n # print(\"Dice Roll\")\n i -= 1\n duplicate_count = count_duplicates(sentence)\n if duplicate_count > 0:\n raise ValueError\n print(sentence)\n return (sentence)\n except KeyError:\n print(\"keyerror\")\n print(sentence)\n\n except TypeError:\n print(\"typeERROR\")\n except ValueError:\n print(\"too many duplicates\")", "def generate(model, voc, maxlen=20, diversity=0.5, numchars=100):\n\n text, char_indices, indices_char = voc\n chars = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n #print(\"Insert text to start from [min 20 chars]:\")\n #sentence = str(raw_input())\n #sentence = sentence[:maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(numchars):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()", "def explode(self):\n return \"...it's a glove.\"", "def forward_a_sentence(self, inputs, hidden=None):\n embedded = self.embedding(inputs)\n outputs, hidden = self.gru(embedded, hidden)\n return outputs, hidden", "def gen_embedding(path):\r\n word_emb = {}\r\n with open(path, encoding='utf-8') as f:\r\n for line in tqdm(f):\r\n values = line.split()\r\n word_emb[values[0]] = np.asarray(values[1:], dtype='float32')\r\n return word_emb", "def word2vec(self, words):\n with torch.no_grad():\n words = torch.LongTensor(self.doc2token(words))\n result = self.model.embedding(words).numpy()\n return result", "def train_sentence_dbow(model, sentence, lbls, alpha, work=None, train_words=True, train_lbls=True):\n neg_labels = []\n if model.negative:\n # precompute negative labels\n neg_labels = zeros(model.negative + 1)\n neg_labels[0] = 1.0\n\n for label in lbls:\n if label is None:\n continue # OOV word in the input sentence => skip\n for word in sentence:\n if word is None:\n continue # OOV word in the input sentence => skip\n train_sg_pair(model, word, label, alpha, neg_labels, train_words, train_lbls)\n\n return len([word for word in sentence if word is not None])", "def load_glove_data():\n glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')\n f = open(glove_path,'r')\n \n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n \n return model", "def sample(args):\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:\n words, vocab = cPickle.load(f)\n tf.reset_default_graph()\n model = Model(saved_args, True)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n for _ in range(args.count):\n output = model.sample(sess, words, vocab, args.n, args.prime,\n args.sample, args.pick,\n args.width, args.quiet)\n score, matches = eval_str(output)\n print(\"===== Before GTranslate Smoothing. Grammar Score = %i\" %score)\n print(output)\n gtranslate_output = translate(output)\n new_score, new_matches = eval_str(gtranslate_output)\n print(\"===== After GTranslate Smoothing. Grammar Score = %i\" %new_score)\n print(translate(gtranslate_output))\n if args.show_grammar:\n for err in matches:\n print(\"---\")\n print(err)", "def creating_sentence(self, length = 10):\n #Edit so it adds periodss and not spaces at the end of a sentence.\n created_sentence = \"\"\n adding_word = self.dictionary_histogram.sample()\n created_sentence += adding_word+\" \"\n length = length - 1\n\n last_word = adding_word\n\n while length > 0:\n next_word_for = self[adding_word].sample()\n created_sentence += next_word_for+\" \"\n adding_word = next_word_for\n length -= 1\n\n\n return created_sentence", "def embedding(self, seqs):\n batch_size, seqlen = seqs.shape\n seqs = np.reshape(seqs, (-1)) # convert to 1-d indexes [(batch_sz*seqlen)]\n embs = self.word2vec[seqs] # lookup [(batch_sz*seqlen) x emb_sz]\n embs = np.reshape(embs, (batch_size, seqlen, -1)) # recover the shape [batch_sz x seqlen x emb_sz]\n return embs", "def augment_sentence(self):\n # Initialize a tracker to ensure we don't make more than the desired number of changes\n changes = 0\n # Make a queue for later\n queue = [self.sentence]\n\n # While we haven't made too many changes and we still have stuff to change, do work!\n while changes < self.max_changes and len(queue) > 0:\n # Take a sentence from the queue and blast it apart into a POS-tagged list\n current_sentence = queue.pop(0)\n tokenized_sentence = nltk_methods.string_to_pos_tagged_list(current_sentence)\n sentence_length = len(tokenized_sentence)\n # Now modify it according to the variation rate\n for i in range(self.variation_rate):\n # Set variable for tracking a change\n has_changed = False\n attempts = 0\n # Keep trying to make a change until either:\n # 1) You've made a change, OR\n # 2) You've tried to make a change for half the words in the sentence with no success\n while has_changed is not True and attempts <= sentence_length/2:\n syn_sent = tokenized_sentence\n swap_sent = tokenized_sentence\n insert_sent = tokenized_sentence\n del_sent = tokenized_sentence\n successful_changes = 0\n # Hand the sentence off to the specific augmentation methods\n # Note that these methods can all return empty strings, so make sure to handle that\n synonym_replaced_sentence = self.__synonym_replacement(syn_sent)\n if synonym_replaced_sentence is not \"\":\n queue.append(synonym_replaced_sentence)\n self.augmented_sentence_list.append(synonym_replaced_sentence)\n successful_changes += 1\n\n swapped_sentence = self.__swap_two_random_words(swap_sent)\n if swapped_sentence is not \"\":\n queue.append(swapped_sentence)\n self.augmented_sentence_list.append(swapped_sentence)\n successful_changes += 1\n\n inserted_sentence = self.__insert_random_synonym(insert_sent)\n if inserted_sentence is not \"\":\n queue.append(inserted_sentence)\n self.augmented_sentence_list.append(inserted_sentence)\n successful_changes +=1\n\n # We don't want to delete the sentence into oblivion, so have a threshold for smallest possible sentence\n if len(del_sent) >= 15:\n deleted_word_sentence = self.__delete_random_word(del_sent)\n if deleted_word_sentence is not \"\":\n queue.append(deleted_word_sentence)\n self.augmented_sentence_list.append(deleted_word_sentence)\n successful_changes += 1\n \n # Now update the while loop flags\n if successful_changes >= 4:\n has_changed = True\n attempts += 1\n changes += 2", "def compute_bigrams(line: str) -> Sequence[str]:\n return DatasetLSTM.pairwise(chain(line, [\"</s>\"]))", "def generate_similar_sentences(sentence,w2v,percentage_to_replace=1,max_syn=10,num_outputs=50):\n\n list_of_sentences = []\n\n words = pairwise_tokenize(sentence,w2v,remove_stopwords=False) #This has combined any compound words found in word2vec\n\n #if word contains underscore don't sub in synonyms\n dont_sub_idx = []\n compound_word_idx = []\n deleted_idx = []\n for idx,word in enumerate(words):\n if \"_\" in word or word in stopwords.words('english'):\n dont_sub_idx.append(idx)\n if \"_\" in word:\n compound_word_idx.append(idx)\n deleted_idx.append(idx+1)\n if not word:\n dont_sub_idx.append(idx)\n\n pattern = re.compile('[\\W_]+') \n sentence = pattern.sub(\" \",sentence).lower().strip()\n tagged = pos_tag(sentence.split(\" \")) #Pos_tag needs to use the original sentence to tag parts of speech, we will now delete indices that correspond to words that no longer exist b/c of compound\n \n for idx in reversed(compound_word_idx):\n tagged.pop(idx+1)\n \n for tag in tagged:\n if tag[1] == 'NNP':\n dont_sub_idx.append(idx)\n \n for i in xrange(num_outputs):\n new_words = words\n mask = np.random.random_sample(len(words))\n for j in xrange(len(words)):\n if mask[j] < .5 and j not in dont_sub_idx:\n pos = wordnet_pos_code(tagged[j][1])\n synonyms = get_synonyms(words[j],w2v,pos,max=max_syn)\n if len(synonyms) != 0:\n new_words[j] = synonyms[np.random.randint(0,min(max_syn,len(synonyms)))]\n list_of_sentences.append(\" \".join(new_words))\n\n list_of_sentences = set(list_of_sentences)\n return list(list_of_sentences)", "def explode(self):\n\n return \"...it's a glove.\"", "def get_sentence(self):", "def build_sense_embedding(target_sense_to_id, word_freq, EMBEDDING_DIM):\r\n res = {}\r\n wordvecs = load_glove(EMBEDDING_DIM)\r\n \r\n for target_sense_list in target_sense_to_id:\r\n for key, _ in target_sense_list.items():\r\n sense_vector = np.zeros(EMBEDDING_DIM)\r\n senses = key.split(',')\r\n n = 0\r\n for sensekey in senses:\r\n #print(sensekey) \r\n if '/' in sensekey:\r\n continue\r\n sense_synset = sc2ss(sensekey)\r\n if sense_synset:\r\n sense_vector += build_sense_vector(sense_synset, word_freq, wordvecs)\r\n n += 1\r\n if n != 0:\r\n res[key] = sense_vector/n\r\n return res", "def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()", "def chunk_seq(f, text, order=3, length=50):\n next_chunk = text[-1]\n chunklist = next_chunk.split()\n\n while len(text) * order < length:\n candidates = gather_next_chunks(f, chunklist, order) #1\n candidates_edited = edit_chunks(candidates)\n #print candidates_edited\n next_chunk = pick_next_chunk(candidates_edited, next_chunk)\n text.append(next_chunk)\n\n markov = ' '.join(text)\n return markov", "def pass_word(word, model, input_embedding, char2idx, device, use_head=True):\n inp = torch.LongTensor([char2idx['START']] + [char2idx[c] for c in word]).to(device)\n inp = pack([inp])\n out, hidden = model(input_embedding.unsqueeze(0), inp, use_head=use_head)\n return out, hidden", "def __call__(self, text):\n if not text:\n return Doc(self.vocab)\n elif text.isspace():\n return Doc(self.vocab, words=[text], spaces=[False])\n\n snlp_doc = self.snlp(text)\n text = snlp_doc.text\n snlp_tokens, snlp_heads = self.get_tokens_with_heads(snlp_doc)\n words = []\n spaces = []\n pos = []\n tags = []\n morphs = []\n deps = []\n heads = []\n lemmas = []\n offset = 0\n token_texts = [t.text for t in snlp_tokens]\n is_aligned = True\n try:\n words, spaces = self.get_words_and_spaces(token_texts, text)\n except ValueError:\n words = token_texts\n spaces = [True] * len(words)\n is_aligned = False\n warnings.warn(\n \"Due to multiword token expansion or an alignment \"\n \"issue, the original text has been replaced by space-separated \"\n \"expanded tokens.\",\n stacklevel=4,\n )\n offset = 0\n for i, word in enumerate(words):\n if word.isspace() and (\n i + offset >= len(snlp_tokens) or word != snlp_tokens[i + offset].text\n ):\n # insert a space token\n pos.append(\"SPACE\")\n tags.append(\"_SP\")\n morphs.append(\"\")\n deps.append(\"\")\n lemmas.append(word)\n\n # increment any heads left of this position that point beyond\n # this position to the right (already present in heads)\n for j in range(0, len(heads)):\n if j + heads[j] >= i:\n heads[j] += 1\n\n # decrement any heads right of this position that point beyond\n # this position to the left (yet to be added from snlp_heads)\n for j in range(i + offset, len(snlp_heads)):\n if j + snlp_heads[j] < i + offset:\n snlp_heads[j] -= 1\n\n # initial space tokens are attached to the following token,\n # otherwise attach to the preceding token\n if i == 0:\n heads.append(1)\n else:\n heads.append(-1)\n\n offset -= 1\n else:\n token = snlp_tokens[i + offset]\n assert word == token.text\n\n pos.append(token.upos or \"\")\n tags.append(token.xpos or token.upos or \"\")\n morphs.append(token.feats or \"\")\n deps.append(token.deprel or \"\")\n heads.append(snlp_heads[i + offset])\n lemmas.append(token.lemma or \"\")\n\n doc = Doc(\n self.vocab,\n words=words,\n spaces=spaces,\n pos=pos,\n tags=tags,\n morphs=morphs,\n lemmas=lemmas,\n deps=deps,\n heads=[head + i for i, head in enumerate(heads)],\n )\n ents = []\n for ent in snlp_doc.entities:\n ent_span = doc.char_span(ent.start_char, ent.end_char, ent.type)\n ents.append(ent_span)\n if not is_aligned or not all(ents):\n warnings.warn(\n f\"Can't set named entities because of multi-word token \"\n f\"expansion or because the character offsets don't map to \"\n f\"valid tokens produced by the Stanza tokenizer:\\n\"\n f\"Words: {words}\\n\"\n f\"Entities: {[(e.text, e.type, e.start_char, e.end_char) for e in snlp_doc.entities]}\",\n stacklevel=4,\n )\n else:\n doc.ents = ents\n\n if self.svecs is not None:\n doc.user_token_hooks[\"vector\"] = self.token_vector\n doc.user_token_hooks[\"has_vector\"] = self.token_has_vector\n return doc", "def build_new_text(tgram, start_words, max_words):\n out_words = []\n for i in range(max_words - 2):\n if start_words in tgram:\n next_word = random.choice(tgram[start_words])\n out_words.append(next_word)\n start_words = start_words.split()\n start_words = start_words[1] + \" \" + next_word\n else:\n break\n out_words = \" \".join(out_words)\n return out_words", "def TF_cross_val_predict_using_embeddings(glovefile, nlp_column, modeltype, X, y, test=\"\",epochs=50,loc_flag=True):\r\n X = copy.deepcopy(X)\r\n NLP = NLP_Pipeline(glovefile)\r\n train_emb = NLP.fit_transform(X, nlp_column)\r\n print('Train embeddings shape = ', train_emb.shape)\r\n if not isinstance(test, str):\r\n test_emb = NLP.transform(test)\r\n print(' Test embeddings shape = ', test_emb.shape)\r\n ### this contains only the embeddings and hence you must only use it for Emebedding layer in keras\r\n print('NLP.embedding_matrix.shape = ',NLP.embedding_matrix.shape)\r\n print('NLP.vocab_size = ', NLP.vocab_size)\r\n print('NLP.max_length = ', NLP.max_length)\r\n print('NLP.glove_dimension = ',NLP.glove_dimension)\r\n #### now perform the model generation here using embeddings #####\r\n test = test.copy(deep=True)\r\n KF = KFold(n_splits=5)\r\n train_copy = copy.deepcopy(train_emb)\r\n y = copy.deepcopy(y)\r\n if modeltype != 'Regression':\r\n #You need to convert y since y happens to be a binary or multi-class variable\r\n yten, idex = tf.unique(y)\r\n rev_dicti = dict(zip(range(len(yten)),yten.numpy()))\r\n dicti = dict([(v,k) for (k,v) in rev_dicti.items()])\r\n num_predicts = len(yten)\r\n else:\r\n num_predicts = 1\r\n best_pipe = get_keras_model(modeltype, num_predicts, NLP)\r\n model2 = best_pipe\r\n nlp_col = 'cross_val_predictions_glove'\r\n if not loc_flag:\r\n train_emb[nlp_col] = 0\r\n for fold, (t_, v_) in enumerate(KF.split(train_emb,y)):\r\n if loc_flag:\r\n trainm = train_copy.loc[t_]\r\n y_train_copy = y[t_]\r\n testm = train_copy.loc[v_]\r\n else:\r\n trainm = train_copy.iloc[t_]\r\n y_train_copy = y[t_]\r\n testm = train_copy.iloc[v_]\r\n #### Now let us do the model fitting ##### \r\n if modeltype != 'Regression':\r\n y_train_copy = pd.Series(y_train_copy).map(dicti).values\r\n best_pipe.fit(trainm, y_train_copy, epochs=epochs, steps_per_epoch=10, verbose=1)\r\n if modeltype == 'Regression':\r\n testm = best_pipe.predict(testm).ravel()\r\n else:\r\n testm = best_pipe.predict(testm).argmax(axis=1).astype(int)\r\n if loc_flag:\r\n train_emb.loc[v_, nlp_col] = testm\r\n else:\r\n train_emb.iloc[v_, -1] = testm\r\n print(' Predictions for fold %d completed' %(fold+1))\r\n ## This is where we apply the transformer on train data and test ##\r\n if modeltype == 'Regression':\r\n y_train = train_emb[nlp_col].values\r\n else:\r\n y_train = train_emb[nlp_col].map(rev_dicti).values\r\n tf.keras.backend.clear_session()\r\n if not isinstance(test, str):\r\n print(' Returning predictions on test data...')\r\n if modeltype == 'Regression':\r\n model2.fit(train_copy, y)\r\n y_pred = model2.predict(test_emb).ravel()\r\n test[nlp_col] = y_pred\r\n else:\r\n y = pd.Series(y).map(dicti).values\r\n model2.fit(train_copy, y)\r\n y_pred = model2.predict(test_emb).argmax(axis=1).astype(int)\r\n test[nlp_col] = y_pred\r\n y_pred = test[nlp_col].map(rev_dicti)\r\n else:\r\n y_pred = \"\"\r\n test = \"\"\r\n print('cross val predictions train and test completed')\r\n return y_train, y_pred", "def generate_sentence(self):\n if self.word_to_index is None:\n self.log.error(\"Need to load a model or data before this step.\")\n return []\n # Start sentence with the start token\n sentence = [self.word_to_index[self.sentence_start_token]]\n # Predict next word until end token is received\n while not sentence[-1] == self.word_to_index[self.sentence_end_token]:\n next_word_probs = self.forward_propagate(sentence)\n sampled_word = self.word_to_index[self.unknown_token]\n # We don't want the unknown token to appear in the sentence\n while sampled_word == self.word_to_index[self.unknown_token]:\n samples = np.random.multinomial(1, next_word_probs[-1])\n sampled_word = np.argmax(samples)\n sentence.append(sampled_word)\n sentence_str = [self.index_to_word[word] for word in sentence[1:-1]]\n return sentence_str", "def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]", "def generate_initial_embs(emb_type):\n def _get_emb_avg(g, lang):\n \"\"\"Compute the embedding of g as the average of its word embeddings\n :param g: the input genre\n :param lang: language\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _get_emb_wavg(g, lang, a=0.001):\n \"\"\"Compute the embeddings of g with a sentence embedding algorithm (average weighted by the word estimated frequencies)\n :param g: the input genre\n :param lang: language\n :param a: a model hyper-parameter (see Arora et al. in the paper)\n :return: the embedding and if all words of this genre are known\n \"\"\"\n emb = np.zeros(emb_dims[lang])\n known_words_count = 0\n words = g.split()\n for w in words:\n if w in models[lang]:\n emb += a / (a + word_freqs[lang][w]) * models[lang][w]\n known_words_count += 1\n emb /= len(words)\n return emb, known_words_count > 0\n\n def _remove_pc(df_embs, npc=1):\n \"\"\"Remove the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the normalized embeddings\n \"\"\"\n pc = _compute_pc(df_embs, npc)\n if npc == 1:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()) * pc\n else:\n df_embs_out = df_embs - df_embs.dot(pc.transpose()).dot(pc)\n return df_embs_out\n\n def _compute_pc(df_embs, npc=1):\n \"\"\"Compute the pc (see Arora at el. in the paper)\n :param df_embs: the input embeddings\n :return: the principal component\n \"\"\"\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(df_embs)\n return svd.components_\n\n embs = {}\n known = {}\n for g in G.nodes:\n lang = g[:2]\n norm_g = TagManager.normalize_tag_wtokenization(g, tries[lang], prefixed=True)\n if emb_type == 'avg':\n embs[g], known[g] = _get_emb_avg(norm_g, lang)\n else:\n embs[g], known[g] = _get_emb_wavg(norm_g, lang)\n\n embs = pd.DataFrame(embs).T # the embeddings are columns\n if emb_type == 'sif': # the algorithm imposes a normalization\n norm_embs = _remove_pc(embs.to_numpy())\n embs = pd.DataFrame(norm_embs, columns=embs.columns, index=embs.index)\n return embs, known", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def build_seq_embeddings(self, input_seqs):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.embedding_size],\n initializer=self.initializer)\n seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)\n\n return seq_embeddings", "def hook(self, sentence, words):\n pass", "def new_sentence():\n #Get the arguments from the get request\n seed = str(request.args.get(\"seed\"))\n message = str(request.args.get(\"message\"))\n try:\n size = int(request.args.get(\"n\"))\n except ValueError:\n size = len(seed)\n\n #Generate the markov model\n model = markov.make_model(message, size)\n\n #Return a json dictionary, containing the next seed and sentence\n return json.dumps({\"seed\":markov.random_seed(message, size), \"next_sentence\":markov.next_sentence(model, seed)})", "def build_sentence_encoders(tparams, options):\n opt_ret = dict()\n trng = RandomStreams(1234)\n\n #xs, masks, sents_all = [], [], []\n in_outs = []\n\n langs = options['langs']\n for lang in langs:\n # description string: #words x #samples\n # forward\n x = tensor.matrix('x_%s'%lang, dtype='int64')\n mask = tensor.matrix('x_mask_%s'%lang, dtype='float32')\n\n n_timesteps = x.shape[0]\n n_samples = x.shape[1]\n\n # Word embedding (forward)\n emb = tparams['Wemb_%s'%lang][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['bidirectional_enc']:\n # backward RNN\n x_r = x[::-1]\n mask_r = mask[::-1]\n emb_r = tparams['Wemb_%s'%lang][x_r.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])\n\n if options['use_dropout']:\n retain_probability_emb = 1-options['dropout_embedding']\n retain_probability_hidden = 1-options['dropout_hidden']\n retain_probability_source = 1-options['dropout_source']\n rec_dropout = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))\n source_dropout = theano.shared(numpy.float32(retain_probability_source))\n emb *= source_dropout\n if options['bidirectional_enc']:\n embr *= source_dropout\n else:\n rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))\n emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))\n\n # Encode sentences\n if options['encoder_%s'%lang] == 'bow':\n sents = (emb * mask[:,:,None]).sum(0)\n else:\n # iteratively push input from first hidden layer until the last\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask,\n emb_dropout=emb_dropout, rec_dropout=rec_dropout)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj = layer_below\n\n if options['bidirectional_enc']:\n for i in range(int(options['n_enc_hidden_layers'])):\n layer_name_prefix='encoder_%s_r_%i'%(lang,i)\n # if first layer input are wembs, otherwise input will be output of last hidden layer\n layer_below=emb_r if i==0 else layer_below[0]\n layer_below=get_layer(options['encoder_%s'%lang])[1](tparams,\n layer_below, options, None, prefix=layer_name_prefix, mask=mask_r,\n emb_dropout=emb_dropout_r, rec_dropout=rec_dropout_r)\n\n if i==int(options['n_enc_hidden_layers'])-1:\n # sentence embeddings (projections) are the output of the last hidden layer\n proj_r = layer_below\n\n # use last hidden state of forward and backward RNNs\n sents = concatenate([proj[0][-1],proj_r[0][-1]], axis=proj[0].ndim-2)\n else:\n sents = proj[0][-1]\n\n if options['use_dropout']:\n sents *= shared_dropout_layer((n_samples, options['dim']), use_noise, trng, retain_probability_hidden)\n\n # project sentences into multimodal space\n sents_mm = get_layer('ff')[1](tparams, sents, options, prefix='ff_sentence_mm', activ='linear')\n if not 'attention_type' in options or options['attention_type'] == 'dot':\n sents_mm = l2norm(sents_mm)\n\n if options['use_dropout']:\n sents_mm *= shared_dropout_layer((n_samples, options['dim_multimodal']), use_noise, trng, retain_probability_hidden)\n\n # outputs per language\n in_outs.append(([x, mask], sents_mm))\n\n return trng, in_outs", "def add_sentence(self, sentence):\n for word in sentence.split(' '):\n self.add_word(word)" ]
[ "0.6890587", "0.6337464", "0.6256114", "0.6213781", "0.61906195", "0.6124841", "0.6122943", "0.5976476", "0.5967347", "0.59281605", "0.5920156", "0.59157413", "0.591105", "0.5867188", "0.58647937", "0.5861155", "0.58420885", "0.58300006", "0.58148724", "0.5812145", "0.5800837", "0.5772145", "0.5754972", "0.5753906", "0.5750292", "0.5739473", "0.57206994", "0.57160264", "0.57013386", "0.56924766", "0.56801057", "0.5675863", "0.56756306", "0.5674888", "0.5663197", "0.5663059", "0.5660208", "0.56545544", "0.56498903", "0.5643705", "0.5632515", "0.5625474", "0.5622835", "0.56214875", "0.5619631", "0.5617193", "0.561563", "0.5608252", "0.5596952", "0.55915433", "0.55782485", "0.55663085", "0.55527145", "0.5548177", "0.55359983", "0.5533284", "0.5528568", "0.55228174", "0.55183786", "0.55146277", "0.5509986", "0.5508808", "0.55033404", "0.5481082", "0.5457967", "0.545753", "0.54520327", "0.54506606", "0.54462177", "0.5441898", "0.54374236", "0.54315996", "0.54300594", "0.5429292", "0.54283977", "0.5427934", "0.54265326", "0.54240936", "0.5423596", "0.5413395", "0.54126346", "0.5406384", "0.54017836", "0.53996634", "0.5381537", "0.53773904", "0.53709674", "0.5365613", "0.5363548", "0.5357049", "0.53534317", "0.53513545", "0.53468263", "0.5342187", "0.5340122", "0.5336984", "0.5335056", "0.53344727", "0.5332242", "0.53302056" ]
0.80186236
0
Load embeddings either from cache or from scratch
def load_embeddings(config, name, vocab, training_generator, validation_generator): # Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer. # Applies down the road when/if we attempt active learning data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p') valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p') if os.path.exists(train_embed_pkl_f): with open( train_embed_pkl_f, 'rb') as cache: train_embeddings = pickle.load(cache) with open(valid_embed_pkl_f, 'rb') as cache: valid_embeddings = pickle.load(cache) else: # get embeddings from scratch tokenizer = AutoTokenizer.from_pretrained(vocab) embedding_model = AbstractBert(vocab) if torch.cuda.device_count() > 1: print("GPUs Available: ", torch.cuda.device_count()) embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2]) use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") embedding_model.eval().to(device) logger.info(' Getting BERT/ROBERTA embeddings...') train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config["metadata"]) valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config["metadata"]) # save embeddings pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb')) pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb')) logger.info(' Saved full BERT/ROBERTA embeddings.') embedding_shape = train_embeddings['embeddings'][1].shape[0] return embedding_shape, train_embeddings, valid_embeddings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_embeddings(cache_path):\n with open(cache_path, \"rb\") as fp:\n _cached_embs = pickle.load(fp)\n return _cached_embs", "def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)", "def load_embeddings(path, vocab, source_domain, target_domain, emb_name):\n\n pkl = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n if os.path.exists(pkl):\n print(\"Load embeddings from existing pkl file %s...\" % pkl)\n # word embeddings weights have been loaded\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n print(\"Load embedding from %s...\" % path)\n raw_embeddings = {}\n if emb_name == 'yelp_electronics':\n with open(path) as fp:\n for line in fp:\n word_vector = line.split(\",\")[:-1]\n vector_list = []\n for element in word_vector[len(word_vector) - 100:]:\n vector_list.append(float(element))\n word = ','.join(word_vector[:len(word_vector) - 100])\n vector = np.asarray(vector_list)\n if word in vocab:\n raw_embeddings[word] = vector\n else:\n with open(path) as fp:\n for line in fp:\n eles = line.strip().split(' ')\n word = eles[0]\n if word in vocab:\n raw_embeddings[word] = eles[1:]\n\n dim_w = len(raw_embeddings['the'])\n n_words = len(vocab)\n embeddings = np.zeros(shape=(n_words, dim_w))\n for w in vocab:\n wid = vocab[w]\n if w in raw_embeddings:\n embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])\n else:\n # for OOV words, add random initialization\n embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)\n print(\"Find %s word embeddings...\" % len(embeddings))\n if not os.path.exists('./work/embeddings'):\n os.mkdir('./work/embeddings')\n emb_path = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n # write the embedding weights back to the disk\n pickle.dump(embeddings, open(emb_path, 'wb'))\n embeddings = np.array(embeddings, dtype='float32')\n return embeddings", "def load_pretrained_embeddings(self, embedding_path):\n trained_embeddings = {}\n with open(embedding_path, 'r') as fin:\n for line in fin:\n contents = line.strip().split()\n token = contents[0]\n if token not in self.token2id:\n continue\n trained_embeddings[token] = list(map(float, contents[1:]))\n embed_size = len(contents) - 1\n # load embeddings\n self.embeddings = np.random.randn([self.size, embed_size])\n for token in self.id2token:\n if token in trained_embeddings:\n self.embeddings[self.token2id[token]] = trained_embeddings[token]", "def init(self, preload_embeddings):\n\t\tself.__find_metadata()\n\t\tself.__parse_embedding_metadata()\n\t\tself.__parse_model_metadata()\n\t\t# should we load all of the word embeddings into memory now?\n\t\tif preload_embeddings:\n\t\t\tlog.info(\"Preloading word embeddings ...\")\n\t\t\tfor embed_id in self.embedding_meta:\n\t\t\t\tself.get_embedding(embed_id)\t\n\t\t\tlog.info(\"Preloaded %d word embeddings\" % len(self.embedding_cache))", "def load_pretrained_embeddings(self, embeddings):\r\n self.embedding.weight = nn.Parameter(embeddings)", "def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)", "def load_pretrained_embeddings(self, embedding_path):\n trained_embeddings = {}\n with open(embedding_path, 'r', encoding='utf-8') as fin:\n for line in fin:\n contents = line.strip().split(\" \")\n term = contents[0]\n if term not in self.term2id:\n continue\n trained_embeddings[term] = list(map(float, contents[1:]))\n if self.embed_dim is None:\n self.embed_dim = len(contents) - 1\n filtered_terms = trained_embeddings.keys()\n # rebuild the term x id map\n self.term2id = {}\n self.id2term = {}\n for term in self.initial_terms:\n self.add(term, count=0)\n for term in filtered_terms:\n self.add(term, count=0)\n # load embeddings\n self.embeddings = np.zeros([self.size(), self.embed_dim])\n for term in self.term2id.keys():\n if term in trained_embeddings:\n self.embeddings[self.get_id(term)] = trained_embeddings[term]", "def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict", "def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300", "def load_embeddings_models():\n\n\t# ---LOADING WORD2VEC MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'word2vec', 'NILC', 'nilc_cbow_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'word2vec', 'NILC', 'nilc_skip_s300.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the word2vec model\")\n\tword2vec_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# word2vec_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING FASTTEXT MODEL---\n\tmodel_path = os.path.join(ROOT_PATH, 'models', 'fastText', 'cc.pt.300_300k.vec')\n\tstart_time = time.time()\n\tprint(\"Started loading the fasttext model\")\n\tfasttext_model = KeyedVectors.load_word2vec_format(model_path)\n\t# fasttext_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\t\n\n\t# ---LOADING PT-LKB MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'ontoPT', 'PT-LKB_embeddings_64', 'ptlkb_64_30_200_p_str.emb')\n\t# model_load_path = os.path.join('models', 'ontoPT', 'PT-LKB_embeddings_128', 'ptlkb_128_80_10_p_str.emb')\n\tstart_time = time.time()\n\tprint(\"Started loading the PT-LKB-64 model\")\n\tptlkb64_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# ptlkb64_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING GLOVE-300 MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'glove', 'glove_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'glove', 'glove_s100.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the GLOVE 300 dimensions model\")\n\tglove300_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# glove300_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING NUMBERBATCH MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'numberbatch', 'numberbatch-17.02_pt_tratado.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the NUMBERBATCH dimensions model\")\n\tnumberbatch_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# numberbatch_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\treturn word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model", "def load_data(args):\n if args.use_mnist:\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,),\n (0.3081,))\n ])\n print(\"Loading vocab...\")\n with open(args.vocab_loc, 'rb') as f:\n vocab = pickle.load(f)\n print(\"number of unique tokens: %d\" % len(vocab))\n\n print(\"Get data loader...\")\n train_loader = get_mnist_loader(\n vocab=vocab, train=True, download=True,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2\n )\n test_loader = get_mnist_loader(\n vocab=vocab, train=False, download=True,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=2\n\n )\n\n else:\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))\n ])\n print(\"Loading vocab...\")\n with open(args.vocab_loc, 'rb') as f:\n vocab = pickle.load(f)\n print(\"number of unique tokens: %d\" % len(vocab))\n\n print(\"Get data loader...\")\n train_loader = get_loader(\n root=args.images_loc, json=args.captions_loc, vocab=vocab, train=True,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2\n )\n test_loader = get_loader(\n root=args.images_loc, json=args.captions_loc, vocab=vocab, train=False,\n transform=transform,\n batch_size=args.batch_size,\n shuffle=False,\n num_workers=2\n )\n\n # Input: word vector\n if args.embeddings_loc:\n print(\"\\nLoading word embeddings from %s\" % args.embeddings_loc)\n if 'google' in args.embeddings_loc.lower() and args.embeddings_loc.endswith('.bin'):\n w2v = KeyedVectors.load_word2vec_format(args.embeddings_loc, binary=True)\n emb_size = w2v.vector_size\n elif 'glove' in args.embeddings_loc.lower() and args.embeddings_loc.endswith('.txt'):\n w2v, emb_size = load_glove_vec(args.embeddings_loc)\n else:\n print(\"ERROR: unknown embedding file %s\" % args.embeddings_loc)\n return\n\n embeddings = np.random.uniform(-0.1, 0.1, size=(len(vocab), emb_size))\n for word, idx in vocab.word2idx.items():\n if word in w2v:\n embeddings[idx] = w2v[word]\n else:\n print(\"\\nCreating random word embeddings of size %dx%d\" % (len(vocab), args.embedding_size))\n embeddings = np.random.uniform(-0.1, 0.1, size=(len(vocab), args.embedding_size))\n\n return vocab, train_loader, test_loader, embeddings", "def __load(self, use_cache):\n\n cache_path = path_lib.get_relative_file_path('runtime', 'input_cache', f'company_embeddings_{VERSION}.pkl')\n if use_cache and os.path.isfile(cache_path):\n return path_lib.read_cache(cache_path)\n\n print(f'\\nloading data from {self.__competitor_path} ...')\n with open(self.__competitor_path, 'rb') as f:\n tmp = json.load(f)\n d_linkedin_name_2_linkedin_val = tmp['d_linkedin_name_2_linkedin_val']\n\n data = []\n\n print('loading sentence bert to generate embeddings ...')\n from sentence_transformers import SentenceTransformer\n self.__sentence_bert = SentenceTransformer('bert-large-nli-stsb-mean-tokens')\n\n # converting the raw data to features that we need\n for linkedin_name, linkedin_val in d_linkedin_name_2_linkedin_val.items():\n # get features\n feature = self.__choose_features(linkedin_val)\n data.append([feature, linkedin_name])\n\n print('writing cache ...')\n path_lib.cache(cache_path, data)\n\n print('finish loading ')\n return data", "def load_embedding(path=PROJECT_DIR / \"outputs/models/embedding.pkl\"):\n try:\n with open(path, \"rb\") as inp:\n embedding = pickle.load(inp)\n return embedding\n\n except FileNotFoundError:\n logger.error(f\"There is no embedding to load at {path}\")", "def load_pretrained_embedding(self, pre_embeddings):\n assert (pre_embeddings.size()[1] == self.embedding_dim)\n self.word_embeds.weight = nn.Parameter(pre_embeddings)", "def _dump_embeddings(self, cache_path, data):\n msg = f\"bert embeddings are are being cached for entity_type: `{self.type}` \" \\\n f\"for quicker entity resolution; consumes some disk space\"\n logger.info(msg)\n\n folder = os.path.split(cache_path)[0]\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n with open(cache_path, \"wb\") as fp:\n pickle.dump(data, fp)", "def get_embedding(self, embed_id):\n\t\tif not embed_id in self.embedding_meta:\n\t\t\treturn None\n\t\tif embed_id in self.embedding_cache:\n\t\t\tlog.info(\"Using cached embedding for %s\" % embed_id)\n\t\t\treturn self.embedding_cache[embed_id]\n\t\t# load the associated word embedding\n\t\tem = self.embedding_meta[embed_id]\n\t\tin_path = em.dir_base / em[\"file\"]\n\t\tlog.info(\"Loading word embedding from %s\" % in_path)\n\t\ttry:\n\t\t\tself.embedding_cache[embed_id] = Embedding(in_path)\n\t\texcept Exception as e:\n\t\t\tlog.warning(\"Failed to load word embedding: %s\" % in_path)\n\t\t\tlog.warning(e)\n\t\t\treturn None\n\t\treturn self.embedding_cache[embed_id]", "def load_embeddings(self, str_file):\n\n with open(str_file, 'rb') as f_read:\n self.embeddings_entity = pickle.load(f_read)\n self.embeddings_relation = pickle.load(f_read)\n self.dict_paras = pickle.load(f_read)", "def load_embedding(fpath, VOCAB):\n print(\"Loading embeddings...\")\n emb = dict()\n wv_from_bin = KeyedVectors.load_word2vec_format(fpath, limit=VOCAB)\n for word, vector in tqdm(zip(wv_from_bin.vocab, wv_from_bin.vectors)):\n coefs = np.asarray(vector, dtype='float32')\n if word not in emb:\n emb[word] = coefs\n return emb", "def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim", "def get_word_embeddings(t, folder, lang=\"en\"):\n vecs_url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz\"\n vecs_gz_filename = vecs_url.rpartition(\"/\")[2]\n os.makedirs(folder, exist_ok=True)\n vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)\n\n tokenizer_vocab_size = len(t.vocab)\n\n if wait_for_file_stable(vecs_gz_filepath):\n print(\"Using existing embeddings file\")\n else:\n print(\"Downloading word vectors...\")\n subprocess.run([\" \".join([\"wget\", \"-NP\", folder, vecs_url])], check=True, shell=True)\n\n print(\"Loading into memory...\")\n embeddings_index = dict()\n with gzip.open(vecs_gz_filepath, \"rt\") as zipf:\n firstline = zipf.readline()\n emb_vocab_size, emb_d = firstline.split(\" \")\n emb_vocab_size = int(emb_vocab_size)\n emb_d = int(emb_d)\n for line in zipf:\n values = line.split()\n word = values[0]\n # Only load subset of the embeddings recognised by the tokenizer:\n if word in t.vocab.stoi:\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_index[word] = coefs\n print(\"Loaded {} of {} word vectors for tokenizer vocabulary length {}\".format(\n len(embeddings_index),\n emb_vocab_size,\n tokenizer_vocab_size,\n ))\n\n # create a weight matrix for words in training docs\n embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))\n for word, i in t.vocab.stoi.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix", "def pretrained(name=\"glove_100d\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(WordEmbeddingsModel, name, lang, remote_loc)", "def load_embeddings(embedding_path, embedding_size, embedding_format):\n print(\"Loading word embeddings from {}...\".format(embedding_path))\n\n if embedding_format in ['vec', 'txt']:\n default_embedding = np.zeros(embedding_size)\n embedding_dict = collections.defaultdict(lambda: default_embedding)\n skip_first = embedding_format == \"vec\"\n with open(embedding_path) as f:\n for i, line in enumerate(f.readlines()):\n if skip_first and i == 0:\n continue\n splits = line.split(' ')\n assert len(splits) == embedding_size + 1\n word = splits[0]\n embedding = np.array([float(s) for s in splits[1:]])\n embedding_dict[word] = embedding\n elif embedding_format == 'bin':\n embedding_dict = fasttext.load_model(embedding_path)\n else:\n raise ValueError('Not supported embeddings format {}'.format(embedding_format))\n print(\"Done loading word embeddings.\")\n return embedding_dict", "def load_all(self):\n if os.path.isfile(self.vocab_path):\n self.vocab_processor = self.load_vocab()\n else:\n self.vocab_processor = self.train_vocab()\n if self.data_path:\n self.x, self.y = self.load_data(self.need_shuffle)\n print(\"Max document length: {}\".format(self.max_doc))", "def load_embeddings(glove_path, vocab):\n vocab_size = vocab.get_vocab_size()\n words_to_keep = set(vocab.get_index_to_token_vocabulary().values())\n glove_embeddings = {}\n embedding_dim = None\n\n logger.info(\"Reading GloVe embeddings from {}\".format(glove_path))\n with open(glove_path) as glove_file:\n for line in tqdm(glove_file,\n total=get_num_lines(glove_path)):\n fields = line.strip().split(\" \")\n word = fields[0]\n if word in words_to_keep:\n vector = np.asarray(fields[1:], dtype=\"float32\")\n if embedding_dim is None:\n embedding_dim = len(vector)\n else:\n assert embedding_dim == len(vector)\n glove_embeddings[word] = vector\n\n all_embeddings = np.asarray(list(glove_embeddings.values()))\n embeddings_mean = float(np.mean(all_embeddings))\n embeddings_std = float(np.std(all_embeddings))\n logger.info(\"Initializing {}-dimensional pretrained \"\n \"embeddings for {} tokens\".format(\n embedding_dim, vocab_size))\n embedding_matrix = torch.FloatTensor(\n vocab_size, embedding_dim).normal_(\n embeddings_mean, embeddings_std)\n # Manually zero out the embedding of the padding token (0).\n embedding_matrix[0].fill_(0)\n # This starts from 1 because 0 is the padding token, which\n # we don't want to modify.\n for i in range(1, vocab_size):\n word = vocab.get_token_from_index(i)\n\n # If we don't have a pre-trained vector for this word,\n # we don't change the row and the word has random initialization.\n if word in glove_embeddings:\n embedding_matrix[i] = torch.FloatTensor(glove_embeddings[word])\n return embedding_matrix", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def load_embeddings(emb_file, word_map):\n\n # Find embedding dimension\n with open(emb_file, 'r') as f:\n emb_dim = len(f.readline().split(' ')) - 1\n\n vocab = set(word_map.keys())\n\n # Create tensor to hold embeddings, initialize\n embeddings = torch.FloatTensor(len(vocab), emb_dim)\n init_embedding(embeddings)\n\n # Read embedding file\n print(\"\\nLoading embeddings...\")\n for line in open(emb_file, 'r'):\n line = line.split(' ')\n\n emb_word = line[0]\n embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))\n\n # Ignore word if not in train_vocab\n if emb_word not in vocab:\n continue\n\n embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)\n\n return embeddings, emb_dim", "def load_pretrain_embedding(vocab, embed_size, embedding_path):\n model = KeyedVectors.load_word2vec_format(embedding_path)\n\n print('{} {}'.format(vocab.size(), embed_size))\n for token, id in vocab.token2id.items():\n if token in model:\n print('{} {}'.format(token, ' '.join(map(str, model[token]))))\n else:\n emb = np.random.random((embed_size,)) - 0.5\n print('{} {}'.format(token, ' '.join(map(str, emb))))", "def set_embeddings(self):", "def load_pretrained_embeddings(vocabulary: dict, max_size: int):\n # get GloVe 6B pre-trained word embeddings, of dimension 100\n glove_vec = torchtext.vocab.GloVe(name=\"6B\", dim=100, unk_init=torch.Tensor.normal_)\n\n pretrained = []\n for k, _ in vocabulary.stoi.items():\n if k == \"<PAD>\":\n emb = torch.zeros([glove_vec.dim])\n elif k == \"<UNK>\":\n emb = torch.rand([glove_vec.dim])\n else:\n emb = glove_vec.get_vecs_by_tokens(k, lower_case_backup=True)\n pretrained.append(emb) \n\n # return a tensor of size [vocab_size, emb_dim]\n return torch.stack(pretrained, dim=0)", "def build_pre_embedding(self, use_saved_embed=False):\n\n if use_saved_embed and\\\n self.config.parser['embedding_save_dir'] is not '':\n Print(\n f'reading saved embedding file from '\\\n f'{self.config.parser[\"embedding_save_dir\"]}',\n 'information'\n )\n with open(self.config.parser['embedding_save_dir'], 'rb') as f:\n pretrain_embed = pickle.load(f)\n else:\n if self.config.parser['embed_dir'] is None:\n Print('Pre-trained embedding file not available.', 'error')\n return\n\n embed_file = self.config.parser['embed_dir']\n\n # load in pre-trained Glove model, save it as a dict\n pretrain_embed = {}\n with open(embed_file, 'r', encoding='utf-8') as f:\n tqdm_iter = tqdm.tqdm(f.readlines())\n tqdm_iter.set_description('read from pre-trained file', False)\n for line in tqdm_iter:\n embed_content = line.strip().split()\n word, embed_content = embed_content[0], embed_content[1:]\n if self.config.parser['word_embed_dim'] < 0:\n self.config.parser['word_embed_dim'] = len(embed_content)\n elif self.config.parser['word_embed_dim'] != len(embed_content):\n # invalid embedding word\n continue\n embed_content = np.array([float(x) for x in embed_content])\n pretrain_embed[word] = embed_content\n \n if self.config.parser['embedding_save_dir'] is not '':\n with open(self.config.parser['embedding_save_dir'], 'wb') as f:\n pickle.dump(pretrain_embed, f)\n Print(\n f'pre-trained embedding dictionary is saved at '\\\n f'{self.config.parser[\"embedding_save_dir\"]}',\n 'success'\n )\n\n embed_dim = self.config.parser['word_embed_dim']\n\n # build embedding if find it in pre-trained model\n # else randomly generate one.\n self.embedding = np.empty([\n self.word_dict.word_size, embed_dim\n ])\n scale = np.sqrt(3 / embed_dim)\n perfect_match, case_match, not_match = 0, 0, 0\n for word, index in self.word_dict.word2idx.items():\n if word in pretrain_embed:\n self.embedding[index, :] = self.norm2one(pretrain_embed[word]) \\\n if self.config.parser['norm_word_embed'] else pretrain_embed[word]\n perfect_match += 1\n if word.lower() in pretrain_embed:\n self.embedding[index, :] = self.norm2one(pretrain_embed[word.lower()]) \\\n if self.config.parser['norm_word_embed'] else pretrain_embed[word.lower()]\n case_match += 1\n else:\n # not found\n self.embedding[index,\n :] = np.random.uniform(-scale, scale, [embed_dim])\n not_match += 1\n Print(\n f'Pre-trained embedding loaded in from {self.config.parser[\"embed_dir\"]},\\n'\\\n f'pre-train words: {len(pretrain_embed)}, perfect match {perfect_match},\\n'\\\n f'case match {case_match}, not match {not_match},\\n'\\\n f'oov {not_match / self.word_dict.word_size}', 'success'\n )\n return self.embedding", "def load_kb_embeddings(path_to_folder):\n\n entity2idx = {}\n allowed_indices = set()\n with open(\"data/entity2id.filtered.txt\", 'r') as f:\n for l in f.readlines():\n k, v, idx = tuple(l.strip().split(\"\\t\"))\n entity2idx[k] = int(idx) + 3\n allowed_indices.add(int(v))\n\n embeddings = []\n with open(path_to_folder + \"/entity2vec.vec\", 'r') as f:\n idx = 0\n for line in f.readlines():\n if idx in allowed_indices:\n split = line.strip().split('\\t')\n embeddings.append([float(num) for num in split])\n idx += 1\n\n entity2idx[all_zeroes] = 0 # 0 is reserved for padding\n entity2idx[unknown_el] = 1 # 1 is reserved for OOV\n entity2idx[no_annotation] = 2 # 2 is reserved for no annotation tokens\n embedding_size = len(embeddings[0])\n vector_oov = 2 * 0.1 * np.random.rand(embedding_size) - 0.1\n vector_na = 2 * 0.1 * np.random.rand(embedding_size) - 0.1\n embeddings = np.asarray([[0.0]*embedding_size, vector_oov, vector_na] + embeddings, dtype='float32')\n\n print(\"KB embeddings loaded: {}\".format(embeddings.shape))\n assert len(entity2idx) == len(embeddings)\n\n return entity2idx, embeddings", "def _load_word_embedding(self, lang):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if self.args.task == 'conneau' or self.args.task == 'xling':\n data_dir = os.path.join(self.args.data_dir, 'MUSE')\n lang_path = os.path.join(data_dir, 'wiki.' + lang + '.vec')\n elif self.args.task == 'dinu':\n data_dir = os.path.join(self.args.data_dir, 'dinu')\n lang_path = os.path.join(data_dir, 'embeddings', lang + '.emb.txt')\n elif self.args.task == 'zhang':\n order = [lang,trg]\n if lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(self.args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n lang_path = os.path.join(data_dir, 'word2vec.' + lang)\n\n langfile = open(lang_path, encoding=self.args.encoding, errors='surrogateescape')\n words, xs = embeddings.read(langfile, self.args.maxs)\n langfile.close()\n # Build word to index map\n word2ind = {word: i for i, word in enumerate(words)}\n\n return xs, words, word2ind", "def _read_fasttext_embeddings(self, vocab: vocabs.Vocab, init_fastext):\n with open(init_fastext, encoding='utf-8') as embeddings_file_handle:\n _, dimension = next(embeddings_file_handle).split()\n if int(dimension) != self.emb_dim:\n raise Exception(f\"An embedding size of {self.emb_dim} was specified, but the pretrained embeddings have size {dimension}\")\n\n # Poor man's Glorot initializer for missing embeddings\n bound = np.sqrt(6/(self.vocab_size + self.emb_dim))\n\n total_embs = 0\n in_vocab = 0\n missing = 0\n\n embeddings = np.empty((self.vocab_size, self.emb_dim), dtype='float')\n found = np.zeros(self.vocab_size, dtype='bool_')\n\n for line in embeddings_file_handle:\n total_embs += 1\n word, vals = line.strip().split(' ', 1)\n if word in vocab.w2i:\n in_vocab += 1\n index = vocab.w2i[word]\n embeddings[index] = np.fromstring(vals, sep=\" \")\n found[index] = True\n\n for i in range(self.vocab_size):\n if not found[i]:\n missing += 1\n embeddings[i] = np.random.uniform(-bound, bound, self.emb_dim)\n\n logger.info(f\"{in_vocab} vocabulary matches out of {total_embs} total embeddings; \"\n f\"{missing} vocabulary words without a pretrained embedding out of {self.vocab_size}\")\n\n return embeddings", "def pretrained(name=\"elmo\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(ElmoEmbeddings, name, lang, remote_loc)", "def pretrained(name=\"longformer_base_4096\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(LongformerEmbeddings, name, lang, remote_loc)", "def load_google_embeddings(embeddings_path):\n\n embeddings = KeyedVectors.load_word2vec_format(\n embeddings_path,\n binary=True\n )\n\n dim = embeddings['dog'].size\n\n return embeddings", "def load_from_folder(self, path: str, mode: str = \"r\") -> None:\n embedding_list = []\n metadata_list = []\n\n i = 0\n # Gets the image files in the folder and loads images.\n for root, _, files in tf.io.gfile.walk(path):\n for name in files:\n image_path = os.path.join(root, name)\n if image_path.lower().endswith(\".dat\"):\n continue\n\n try:\n with tf.io.gfile.GFile(image_path, \"rb\") as f:\n buffer = f.read()\n image = tensor_image.TensorImage.create_from_buffer(buffer)\n except RuntimeError as e:\n logging.warning(\n \"Can't read image from the image path %s with the error %s\",\n image_path, e)\n continue\n\n try:\n embedding = self._embedder.embed(\n image).embeddings[0].feature_vector.value\n except (RuntimeError, ValueError) as e:\n logging.warning(\"Can't get the embedding of %s with the error %s\",\n image_path, e)\n continue\n\n embedding_list.append(embedding)\n if self.metadata_type == _MetadataType.FROM_DAT_FILE:\n metadata = self._metadata_loader.load(image_path, mode=mode)\n else:\n metadata = self._metadata_loader.load(image_path)\n metadata_list.append(metadata)\n\n i += 1\n if i % 1000 == 0:\n logging.info(\"Processed %d images.\", i)\n\n cache_dataset = np.stack(embedding_list)\n self._cache_dataset_list.append(cache_dataset)\n self._metadata = self._metadata + metadata_list", "def load_model(embed_dir):\n # need to have gensim model + syn0.npy + syn1neg.npy\n model = gensim.models.Word2Vec.load(embed_dir)\n return model", "def augment_with_pretrained(dictionary, ext_emb_path, words):\n print('Loading pretrained embeddings from %s...' % ext_emb_path)\n assert os.path.isfile(ext_emb_path)\n\n # Load pretrained embeddings from file\n pretrained = set([\n line.rstrip().split()[0].strip()\n for line in codecs.open(ext_emb_path, 'r', 'utf-8')\n if len(ext_emb_path) > 0\n ])\n\n # We either add every word in the pretrained file,\n # or only words given in the `words` list to which\n # we can assign a pretrained embedding\n if words is None:\n for word in pretrained:\n if word not in dictionary:\n dictionary[word] = 0\n else:\n for word in words:\n if any(x in pretrained for x in [\n word,\n word.lower(),\n re.sub('\\d', '0', word.lower())\n ]) and word not in dictionary:\n dictionary[word] = 0\n\n word_to_id, id_to_word = create_mapping(dictionary)\n return dictionary, word_to_id, id_to_word", "def _index(self, corpus):\n\n # Transform documents to embeddings vectors\n ids, dimensions, stream = self.embedder.model.index(corpus)\n\n # Load streamed embeddings back to memory\n embeddings = np.empty((len(ids), dimensions), dtype=np.float32)\n with open(stream, \"rb\") as queue:\n for x in range(embeddings.shape[0]):\n embeddings[x] = pickle.load(queue)\n\n # Remove temporary file\n os.remove(stream)\n\n all_text = []\n for para_id, text, _ in corpus:\n all_text.append([text, para_id])\n\n df = pd.DataFrame(all_text, columns=[\"text\", \"paragraph_id\"])\n\n embedding_path = os.path.join(\n self.index_path, self.embed_paths[\"embeddings\"])\n dataframe_path = os.path.join(\n self.index_path, self.embed_paths[\"dataframe\"])\n ids_path = os.path.join(self.index_path, self.embed_paths[\"ids\"])\n\n # Load new data\n if os.path.isfile(embedding_path) and (self.encoder_args[\"overwrite\"] is False):\n logger.info(f\"Loading new data from {embedding_path}\")\n\n # Load existing embeddings\n old_embeddings = np.load(embedding_path) # LOAD EMBEDDINGS\n # Remove embeddings with document id overlaps\n embeddings = np.vstack((old_embeddings, embeddings))\n\n # load IDs\n old_ids = [doc_id[:-1] for doc_id in open_txt(ids_path)]\n logger.debug(f\"New ID Length = {len(ids)}\")\n logger.debug(f\"Old ID Length = {len(old_ids)}\")\n # Remove document ids overlaps\n logger.debug(f\"New ID Length = {len(ids)}\")\n ids = old_ids + ids\n logger.debug(f\"Merged ID Length = {len(ids)}\")\n\n # Append new dataframe\n old_df = pd.read_csv(dataframe_path)\n df = pd.concat([old_df, df])\n\n # Store embeddings and document index\n # for future reference\n np.save(embedding_path, embeddings)\n with open(ids_path, \"w\") as fp:\n fp.writelines([i + \"\\n\" for i in ids])\n\n # Save data csv\n df.to_csv(dataframe_path, index=False)\n\n # Normalize embeddings\n self.embedder.normalize(embeddings)\n\n # Save embeddings metadata\n self.embedder.config[\"ids\"] = ids\n self.embedder.config[\"dimensions\"] = dimensions\n\n # Create embeddings index\n logger.info(f\"Creating embeddings and index\")\n self.embedder.embeddings = ANN.create(self.embedder.config)\n logger.info(f\"Created embeddings\")\n\n # Build the index\n self.embedder.embeddings.index(embeddings)\n logger.info(f\"Built the embeddings index\")", "def load_embedding(fname, vocab):\n model = gensim.models.Word2Vec.load(fname)\n embedding = model.wv # keep only the embedding dictionary\n del model # frees up memory used to store Word2Vec model\n\n k = len(embedding['a']) # dimension of embedding\n unknown_vec = lambda: np.random.normal(0,0.17,k) #TODO check these parameters\n \n restricted_embedding = {word: default_get(embedding, word, unknown_vec()) for word in vocab}\n return restricted_embedding", "def init_pretrained_glove(glove_path, word2idx, embedding_dim):\n vocab_size = len(word2idx)\n # read in the glove files\n glove_file = os.path.join(glove_path, 'glove.6B.{:d}d.json'.\n format(embedding_dim))\n with open(glove_file, 'r') as fp:\n word2glove = json.load(fp)\n print('Read embeddings: {:s}'.format(glove_file))\n\n # then make giant matrix with all the matching vocab words\n padding_idx = 0\n # follow Karpahty's advice and initialize really small\n pretrained = torch.randn(vocab_size, embedding_dim) * 0.01\n count = 0\n for word, idx in word2idx.iteritems():\n # reserve the padding idx as 0\n if idx == padding_idx:\n torch.FloatTensor(embedding_dim).zero_()\n # keep as random initialization\n if word not in word2glove:\n continue\n pretrained[idx] = torch.FloatTensor(word2glove[word])\n\n embed = torch.nn.Embedding(vocab_size, embedding_dim)\n embed.weight = torch.nn.Parameter(pretrained)\n return embed", "def load_embeddings(db):\n size = db['size'].values\n emb = db['embedding'].values\n emb = [np.load(i).flatten() for i in emb]\n return emb, size", "def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index", "def load_embeddings(self, words, embedding_file):\n words = {w for w in words if w in vocab}\n logger.info('Loading pre-trained embeddings for %d words from %s' %\n (len(words), embedding_file))\n embedding = self.network.embedding.weight.data\n\n # When normalized, some words are duplicated. (Average the embeddings).\n vec_counts = {}\n with open(embedding_file) as f:\n for line in f:\n parsed = line.rstrip().split(' ')\n assert(len(parsed) == embedding.size(1) + 1)\n w = vocab.normalize(parsed[0])\n if w in words:\n vec = torch.Tensor([float(i) for i in parsed[1:]])\n if w not in vec_counts:\n vec_counts[w] = 1\n embedding[vocab[w]].copy_(vec)\n else:\n logging.warning('WARN: Duplicate embedding found for %s' % w)\n vec_counts[w] = vec_counts[w] + 1\n embedding[vocab[w]].add_(vec)\n\n for w, c in vec_counts.items():\n embedding[vocab[w]].div_(c)\n\n logger.info('Loaded %d embeddings (%.2f%%)' %\n (len(vec_counts), 100 * len(vec_counts) / len(words)))", "def augment_with_pretrained(dictionary, ext_emb_path, chars):\n print('Loading pretrained embeddings from %s...' % ext_emb_path)\n assert os.path.isfile(ext_emb_path)\n\n # Load pretrained embeddings from file\n pretrained = set([\n line.rstrip().split()[0].strip()\n for line in codecs.open(ext_emb_path, 'r', 'utf-8')\n if len(ext_emb_path) > 0\n ])\n\n # We either add every word in the pretrained file,\n # or only words given in the `words` list to which\n # we can assign a pretrained embedding\n if chars is None:\n for char in pretrained:\n if char not in dictionary:\n dictionary[char] = 0\n else:\n for char in chars:\n if any(x in pretrained for x in [\n char,\n char.lower(),\n re.sub('\\d', '0', char.lower())\n ]) and char not in dictionary:\n dictionary[char] = 0\n\n word_to_id, id_to_word = create_mapping(dictionary)\n return dictionary, word_to_id, id_to_word", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _XlnetLoader\n jModel = _XlnetLoader(folder, spark_session._jsparkSession)._java_obj\n return XlnetEmbeddings(java_model=jModel)", "def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:\n emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)\n return emb", "def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model", "def load_embeddings(path):\r\n\r\n embeds = dict() # dictionary mapping words to vectors\r\n for line in open(path, encoding='utf-8'):\r\n row = line.strip().split('\\t')\r\n embeds[row[0]] = np.array(row[1:], dtype=np.float32)\r\n\r\n embeddings_dim = embeds[list(embeds)[0]].shape[0]\r\n\r\n return embeds, embeddings_dim", "def load(self):\n\n path = Models.modelPath(\"stackexchange\")\n\n if os.path.isfile(os.path.join(path, \"config\")):\n print(f\"Loading model from {path}\")\n embeddings = Embeddings()\n embeddings.load(path)\n else:\n print(\"ERROR: loading model: ensure model is installed\")\n print(\n \"ERROR: Pre-trained model can be installed by running python -m codequestion.download\"\n )\n raise FileNotFoundError(f\"Unable to load codequestion model from {path}\")\n\n return embeddings", "def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()", "def get_embeddings():\n # Load the raw embedding data\n X_train = np.load('./train_embeddings.npy')\n \n y_train = np.load('./train_labels.npy')\n \n X_valid = np.load('./valid_embeddings.npy')\n \n y_valid = np.load('./valid_labels.npy')\n \n X_test = np.load('./test_embeddings.npy')\n \n y_test = np.load('./test_labels.npy')\n\n #return X_train, y_train\n return X_train, y_train, X_valid, y_valid, X_test, y_test", "def load_pretrained_vectors(self, emb_file, fixed):\n if emb_file:\n pretrained = torch.load(emb_file)\n self.word_lut.weight.data.copy_(pretrained)\n if fixed:\n self.word_lut.weight.requires_grad = False", "def load(self, path: str, lossy: bool, mmap: bool) -> Embeddings:\n if self == Format.finalfusion:\n return load_finalfusion(path, mmap)\n if self == Format.word2vec:\n return load_word2vec(path, lossy)\n if self == Format.text:\n return load_text(path, lossy)\n if self == Format.textdims:\n return load_text_dims(path, lossy)\n if self == Format.fasttext:\n return load_fasttext(path, lossy)\n raise ValueError(f\"Unknown format {str(self)}\")", "def load_embeddings(embedding_path):\n print('loading word embeddings from %s' % embedding_path)\n weight_vectors = []\n word_idx = {}\n with codecs.open(embedding_path, encoding='utf-8') as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n word_idx[word] = len(weight_vectors)\n weight_vectors.append(np.array(vec.split(), dtype=np.float32))\n # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and\n # '-RRB-' respectively in the parse-trees.\n word_idx[u'-LRB-'] = word_idx.pop(u'(')\n word_idx[u'-RRB-'] = word_idx.pop(u')')\n # Random embedding vector for unknown words.\n weight_vectors.append(np.random.uniform(\n -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))\n return np.stack(weight_vectors), word_idx", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _LongformerLoader\n jModel = _LongformerLoader(folder, spark_session._jsparkSession)._java_obj\n return LongformerEmbeddings(java_model=jModel)", "def init_embeddings_from_file(self, filepath, mode=None, **kwargs):\n words = self.d.vocab\n weight, words = EmbeddingLoader(filepath, mode).load(words, **kwargs)\n self.init_embeddings(weight, words)", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _AlbertLoader\n jModel = _AlbertLoader(folder, spark_session._jsparkSession)._java_obj\n return AlbertEmbeddings(java_model=jModel)", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _RoBertaLoader\n jModel = _RoBertaLoader(folder, spark_session._jsparkSession)._java_obj\n return RoBertaEmbeddings(java_model=jModel)", "def pretrained(name=\"small_bert_L2_768\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(BertEmbeddings, name, lang, remote_loc)", "def pretrained(name=\"albert_base_uncased\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(AlbertEmbeddings, name, lang, remote_loc)", "def load_word2vec(emb_path, id_to_word, word_dim, old_weights):\n new_weights = old_weights\n print('Loading pretrained embeddings from {}...'.format(emb_path))\n pre_trained = {}\n emb_invalid = 0\n for i, line in enumerate(codecs.open(emb_path, 'r', 'utf-8')):\n line = line.rstrip().split()\n if len(line) == word_dim + 1:\n pre_trained[line[0]] = np.array(\n [float(x) for x in line[1:]]\n ).astype(np.float32)\n else:\n emb_invalid += 1\n if emb_invalid > 0:\n print('WARNING: %i invalid lines' % emb_invalid)\n c_found = 0\n c_lower = 0\n c_zeros = 0\n n_words = len(id_to_word)\n # Lookup table initialization\n for i in range(n_words):\n word = id_to_word[i]\n if word in pre_trained:\n new_weights[i] = pre_trained[word]\n c_found += 1\n elif word.lower() in pre_trained:\n new_weights[i] = pre_trained[word.lower()]\n c_lower += 1\n elif re.sub('\\d', '0', word.lower()) in pre_trained:\n new_weights[i] = pre_trained[\n re.sub('\\d', '0', word.lower())\n ]\n c_zeros += 1\n print('Loaded %i pretrained embeddings.' % len(pre_trained))\n print('%i / %i (%.4f%%) words have been initialized with '\n 'pretrained embeddings.' % (\n c_found + c_lower + c_zeros, n_words,\n 100. * (c_found + c_lower + c_zeros) / n_words)\n )\n print('%i found directly, %i after lowercasing, '\n '%i after lowercasing + zero.' % (\n c_found, c_lower, c_zeros\n ))\n return new_weights", "def load_model(self, dict_path, path, nnModel, context_size, embed_size):\n self.word2idx = np.load(dict_path, allow_pickle= True).item()\n self.model = nnModel(len(self.word2idx), embed_size, context_size)\n self.model.load_state_dict(torch.load(path))\n self.model.eval()\n logging.info(f'model loaded, and word dict len: {len(self.word2idx)}')", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _XlmRoBertaLoader\n jModel = _XlmRoBertaLoader(folder, spark_session._jsparkSession)._java_obj\n return XlmRoBertaEmbeddings(java_model=jModel)", "def load(cls, filepath) -> 'Word2VecEmbedding':\n with open(filepath, 'rb') as f:\n embedding = pickle.load(f)\n embedding.word2idx = {spell: idx for idx, spell in enumerate(embedding.vocab.idx2word)}\n return embedding", "def _warm_cache(self):\n for word, index in self.word_to_index.items():\n self.embedding_layer.weight.data[index].copy_(torch.from_numpy(self.embedder.get_word_vector(word)))", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _DistilBertLoader\n jModel = _DistilBertLoader(folder, spark_session._jsparkSession)._java_obj\n return DistilBertEmbeddings(java_model=jModel)", "def load_embed(file_name, vocab_size):\n\n with tf.io.gfile.Open(file_name, 'r') as embed_file:\n vocab = []\n embeds = []\n depth = -1\n for index, line in enumerate(embed_file):\n if vocab_size > 0 and index >= vocab_size:\n break\n line = line.strip()\n tokens = line.strip().split(' ')\n word = tokens[0]\n vocab.append(word)\n if depth == -1:\n embed = [float(token) for token in tokens[1:]]\n else:\n embed = [float(token) for token in tokens[-depth:]]\n d = len(embed)\n if depth == -1:\n depth = d\n if d != depth:\n raise ValueError('Inconsistent embedding sizes')\n embeds.append(embed)\n\n embeds = np.stack(embeds)\n\n return vocab, embeds, depth", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _ElmoLoader\n jModel = _ElmoLoader(folder, spark_session._jsparkSession)._java_obj\n return ElmoEmbeddings(java_model=jModel)", "def fetch_molecular_embedding(self, n_molecules: int, cache_directory: str = None):\n return NotImplemented", "def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings", "def load_document_embeddings(path):\n embedding_dimension = 0\n \n # First pass to work out maximum topic ID to create numpy embeddings\n with open(path, 'rb') as avro_file:\n avro_reader = reader(avro_file)\n for document_embedding in avro_reader:\n topic_probs = document_embedding['topic_probs']\n \n for topic_prob in topic_probs:\n topic_id = topic_prob['topic_id']\n if topic_id + 1 > embedding_dimension:\n embedding_dimension = topic_id + 1\n \n # Second pass to actually store the embeddings\n x = []\n y = []\n \n with open(path, 'rb') as avro_file:\n avro_reader = reader(avro_file)\n for document_embedding in avro_reader:\n label = document_embedding['label']\n topic_probs = document_embedding['topic_probs']\n \n embedding = np.zeros(shape=embedding_dimension, dtype=np.float32)\n \n for topic_prob in topic_probs:\n topic_id = topic_prob['topic_id']\n prob = topic_prob['prob']\n embedding[topic_id] = prob\n \n x.append(embedding)\n y.append(label)\n \n return x, y", "def load_data(embed_words=None):\n assert embed_words is None or type(embed_words) == list\n\n profs = load_professions(embed_words=embed_words)\n gender_seed = load_gender_seed(embed_words=embed_words)\n eq_pairs = load_equalize_pairs()\n def_pairs = load_definitional_pairs(embed_words=embed_words)\n return gender_seed, def_pairs, eq_pairs, profs", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _RoBertaSentenceLoader\n jModel = _RoBertaSentenceLoader(folder, spark_session._jsparkSession)._java_obj\n return RoBertaSentenceEmbeddings(java_model=jModel)", "def load(self, path, model_type='word2vec'):\n\n # Code for loading Word2vec model:\n if model_type == 'word2vec':\n self.__model = KeyedVectors.load_word2vec_format(path)\n self.__embedding = self.__model.wv\n\n # Code for loading fastText model:\n elif model_type == 'fasttext':\n self.__model = FastText.load_fasttext_format(path)\n self.__embedding = self.__model.wv\n\n # In case we're trying to load an unsupported model type:\n else:\n raise Exception(\"Model '{}' not supported (must be 'word2vec' or 'fasttext').\".format(model_type) +\n \" Cannot load word embedding model.\")", "def load_preprocessed(self):\n with open(self.words_vocab_file, 'rb') as f:\n self.word_to_id, self.unk_word_list = pickle.load(f)\n self.word_vocab_size = len(self.word_to_id)\n\n if self.unit != \"word\":\n with open(self.sub_vocab_file, 'rb') as f:\n if self.unit == \"char\":\n self.max_word_len = self.get_max_word_length(self.word_to_id) + 2\n self.char_to_id, self.unk_char_list, self.max_word_len = pickle.load(f)\n self.subword_vocab_size = len(self.char_to_id)\n elif self.unit == \"char-ngram\":\n self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, \\\n self.max_ngram_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.ngram_to_id)\n elif self.unit == \"morpheme\":\n self.morpheme_to_id, self.unk_char_list, self.unk_morph_list, \\\n self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n elif self.unit == \"oracle\":\n self.morpheme_to_id, self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n else:\n sys.exit(\"Unknown unit\")", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _XlmRoBertaSentenceLoader\n jModel = _XlmRoBertaSentenceLoader(folder, spark_session._jsparkSession)._java_obj\n return XlmRoBertaSentenceEmbeddings(java_model=jModel)", "def get_embeddings(model, loader, device=torch.device('cpu')):\n embeddings = []\n labels = []\n for item in loader:\n data, label = item\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n output = model(data).squeeze(1)\n\n embedding = output.cpu().data.numpy()\n label = label.cpu().data.numpy()\n embeddings.append(embedding)\n labels.append(label)\n\n embeddings = np.array(embeddings)\n labels = np.array(labels)\n\n return embeddings, labels", "def load_char_embeddings(char_vocab, ds_name):\n n_char = len(char_vocab)\n pkl = './embeddings/%s_char.pkl' % ds_name\n if os.path.exists(pkl):\n print(\"Load character embeddings from %s...\" % pkl)\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n emb_path = './embeddings/char-embeddings.txt'\n print(\"Load character embeddings from %s...\" % emb_path)\n raw_embeddings = {}\n n_found = 0\n with open(emb_path) as fp:\n for line in fp:\n eles = line.strip().split()\n ch = eles[0]\n vec = [float(ele) for ele in eles[1:]]\n if ch not in raw_embeddings:\n raw_embeddings[ch] = vec\n\n dim_ch = len(raw_embeddings['A'])\n embeddings = np.zeros(shape=(n_char, dim_ch))\n for ch in char_vocab:\n cid = char_vocab[ch]\n if ch in raw_embeddings:\n embeddings[cid] = np.array(raw_embeddings[ch])\n n_found += 1\n else:\n embeddings[cid] = np.random.uniform(-0.25, 0.25, dim_ch)\n print(\"Find %s chars in pre-trained character embeddings...\" % n_found)\n embeddings = np.array(embeddings, dtype='float32')\n pickle.dump(embeddings, open(pkl, 'wb'))\n return embeddings", "def load_data():\n t = time()\n print 'loading tweets, please wait...'\n trained_tweets = load_tweets('training_dataset')\n eval_tweets = load_tweets('evaluation_dataset')\n print 'Time taken {}'.format(time() - t)\n t = time()\n print 'loading w2v model, please wait...'\n model = w2v_load_model('GoogleNews-vectors-negative300.bin')\n print 'Time taken {}'.format(time() - t)\n return trained_tweets, eval_tweets, model", "def loadSavedModel(folder, spark_session):\n from sparknlp.internal import _BertLoader\n jModel = _BertLoader(folder, spark_session._jsparkSession)._java_obj\n return BertEmbeddings(java_model=jModel)", "def load_from_datastore(archives=False):\n\n # This shouldn't happen often (should only happen when memcache has\n # been completely evicted), but we still want to be as fast as\n # possible.\n\n bingo_cache = BingoCache()\n\n if archives:\n # Disable cache writes if loading from archives\n bingo_cache.storage_disabled = True\n\n experiment_dict = {}\n alternatives_dict = {}\n\n # Kick both of these off w/ run() so they'll prefetch asynchronously\n experiments = _GAEBingoExperiment.all().filter(\n \"archived =\", archives).run(batch_size=400)\n alternatives = _GAEBingoAlternative.all().filter(\n \"archived =\", archives).run(batch_size=400)\n\n for experiment in experiments:\n experiment_dict[experiment.name] = experiment\n\n alternatives = sorted(list(alternatives), key=lambda alt: alt.number)\n\n for alternative in alternatives:\n if alternative.experiment_name not in alternatives_dict:\n alternatives_dict[alternative.experiment_name] = []\n alternatives_dict[alternative.experiment_name].append(alternative)\n\n for experiment_name in experiment_dict:\n ex, alts = (experiment_dict.get(experiment_name),\n alternatives_dict.get(experiment_name))\n if ex and alts:\n bingo_cache.add_experiment(ex, alts)\n\n # Immediately store in memcache as soon as possible after loading from\n # datastore to minimize # of datastore loads\n bingo_cache.store_if_dirty()\n\n return bingo_cache", "def load_embedding_tf(word_to_index, tf_embeddings_file_path, nb_dims):\n # 1. Define the variable that will hold the embedding:\n tf_embedding = tf.Variable(\n tf.constant(0.0, shape=[len(word_to_index)-1, nb_dims]),\n trainable=False,\n name=\"Embedding\"\n )\n\n # 2. Restore the embedding from disks to TensorFlow, GPU (or CPU if GPU unavailable):\n variables_to_restore = [tf_embedding]\n embedding_saver = tf.compat.v1.train.Saver(variables_to_restore)\n embedding_saver.restore(sess, save_path=tf_embeddings_file_path)\n print(\"TF embeddings restored from '{}'.\".format(tf_embeddings_file_path))\n \n return tf_embedding", "def pretrained(name=\"sent_small_bert_L2_768\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(BertSentenceEmbeddings, name, lang, remote_loc)", "def load(path, device=None):\n\n V, W, vb, wb, dictionary = None, None, None, None, None\n\n dictionary_path = os.path.join(path, 'dictionary')\n if os.path.exists(dictionary_path):\n dictionary = h.dictionary.Dictionary.load(dictionary_path)\n V = np.load(os.path.join(path, 'V.npy'))\n if os.path.exists(os.path.join(path, 'W.npy')):\n W = np.load(os.path.join(path, 'W.npy'))\n if os.path.exists(os.path.join(path, 'v_bias.npy')):\n vb = np.load(os.path.join(path, 'v_bias.npy'))\n if os.path.exists(os.path.join(path, 'w_bias.npy')):\n wb = np.load(os.path.join(path, 'w_bias.npy'))\n\n return Embeddings(\n V, W=W, vb=vb, wb=wb, dictionary=dictionary,\n device=device\n )", "def pretrained(name=\"xlnet_base_cased\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(XlnetEmbeddings, name, lang, remote_loc)", "def load_full():\n _fetch_full()\n return _load(cache_full, _parse_full)", "def pretrained(name=\"roberta_base\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(RoBertaEmbeddings, name, lang, remote_loc)", "def load_batched_dataset(is_train, embeddings):\n tensorize_text_fn = build_tensorize_text_fn(embeddings)\n unbatched = load_data(is_train)\n\n def tensorize(x):\n x[\"premise\"] = tensorize_text_fn(x[\"premise\"])\n x[\"hypothesis\"] = tensorize_text_fn(x[\"hypothesis\"])\n return x\n\n unbatched = unbatched.map(tensorize)\n\n hist_bins = list(range(5, 500, 5))\n batched = unbatched.apply(\n ops.bucket_by_quantiles(lambda x: x[\"premise\"][\"len\"], FLAGS.batch_size,\n 10, hist_bins))\n if is_train:\n batched = batched.shuffle(1000, reshuffle_each_iteration=True)\n batched = batched.repeat()\n\n # Get (features, label) format for tf.estimator\n return batched.map(lambda x: (x, x[\"label\"]))", "def load_bin_embeddings(path, params):\n model = load_fasttext_model(path)\n assert model.get_dimension() == params.emb_dim\n words = model.get_labels()\n logger.info(\"Loaded binary model from %s\" % path)\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate([model.get_word_vector(w)[None] for w in words], 0)\n embeddings = torch.from_numpy(embeddings).float()\n word2id = {w: i for i, w in enumerate(words)}\n logger.info(\"Generated embeddings for %i words.\" % len(words))\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings", "def load_actors_embeddings(dataset_path):\n embeddings = []\n actors = []\n for celebrity in os.listdir(dataset_path):\n cel_path = os.path.join(dataset_path, celebrity)\n for filename in os.listdir(cel_path):\n if filename[-3:] == \"npy\":\n embedding = np.load(os.path.join(cel_path, filename))\n actors.append(celebrity)\n embeddings.append(embedding)\n embeddings = np.array(embeddings)\n return actors, embeddings", "def getWordEmbeddingsMatrix(script_directory, embedding_file):\n translator = str.maketrans('', '', string.punctuation)\n all_words = []\n print(\"Loading vocab from text files in:\")\n for d in os.listdir(script_directory):\n print(d)\n for fname in os.listdir(\"%s/%s\" % (script_directory, d)):\n with open(\"%s/%s/%s\" % (script_directory, d, fname), 'r') as f:\n words = [w.translate(translator) for w in f.read().split() if w.translate(translator) != \"\"]\n all_words.extend(words)\n\n model = KeyedVectors.load_word2vec_format(embedding_file, binary=True)\n vocab = {\"PAD\" : 0, \"EOS\" : 1}\n vocab.update({w : i + 2 for i,w in enumerate([w1 for w1 in set(all_words) if w1 in model]) })\n inv_dict = vocab.keys()\n ## Take a minute to load...\n\n vocab_size = len(inv_dict)\n emb_size = 300 # or whatever the size of your embeddings\n embeddings = np.zeros((vocab_size + 1, emb_size))\n for k,v in vocab.items():\n embeddings[v] = model[k]\n vocab[\"UNK\"] = len(vocab.keys())\n embeddings[vocab[\"UNK\"]] = np.ones(emb_size)\n del model\n ## Now we have a numpy matrix of embeddings...\n # x_model = tf.placeholder(tf.int32, shape=[None, input_size])\n # with tf.device(\"/cpu:0\"):\n # embedded_x = tf.nn.embedding_lookup(embeddings, x_model)\n return embeddings, vocab", "def get_embeddings(emb_path, emb_length, vocab_size, embedding_type):\n print(\"Loading {} embeddings from file: {}...\".format(embedding_type, emb_path))\n\n emb_matrix = []\n str2id = {}\n idx = 0\n with open(emb_path, 'r') as fh:\n for line in tqdm(fh, total=vocab_size):\n line = line.lstrip().rstrip().split(\" \")\n word = line[0]\n vector = list(map(float, line[1:]))\n if emb_length != len(vector):\n raise Exception(\n \"{}: Expected vector of size {}, but got vector of size {}.\".format(idx, emb_length, len(vector)))\n emb_matrix.append(vector)\n str2id[word] = idx\n idx += 1\n\n emb_matrix = np.array(emb_matrix, dtype=np.float32)\n print(\"Loaded {} embedding matrix with shape {}.\".format(embedding_type, emb_matrix.shape))\n\n return emb_matrix, str2id", "async def load_from_store(self):\n start_time = int(time.time()) - self.keep_interval\n for lid, t, d in await self.store.get_recent_partials(start_time):\n self.cache[lid].add(t, d, remove=False)\n self.cache.all.add(t, d, remove=False)\n await self.store.scrub_pplns(start_time)", "def load_memory_map_dir(directory: str) -> Embeddings:\n meta_file = os.path.join(directory, \"meta.json\")\n mem_map_file = os.path.join(directory, \"memory_map\")\n with open(meta_file, \"r\") as f:\n meta = json.load(f)\n shape = tuple(meta['shape'])\n vocab = meta['vocab']\n mem_map = np.memmap(mem_map_file, dtype='float32', mode='r+', shape=shape)\n result = Embeddings(vocab, mem_map, filename=directory, emb_format=\"memory_map_dir\")\n return result", "def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding" ]
[ "0.7840676", "0.7315518", "0.72570604", "0.7248901", "0.72128534", "0.71236014", "0.7081537", "0.70751274", "0.7022085", "0.6956653", "0.673481", "0.6694797", "0.6664453", "0.6633258", "0.65949357", "0.64013827", "0.63974303", "0.6372857", "0.63583666", "0.6338655", "0.63299704", "0.63250667", "0.63124293", "0.6292697", "0.62833077", "0.62781066", "0.62087476", "0.6203341", "0.6199756", "0.61994135", "0.61967444", "0.6192511", "0.6167026", "0.6163421", "0.6161531", "0.61418283", "0.6136138", "0.61143684", "0.60888886", "0.60819346", "0.6077703", "0.6072893", "0.60529286", "0.60468876", "0.6032907", "0.60328394", "0.60285884", "0.60201627", "0.6013671", "0.60025907", "0.59815294", "0.59775734", "0.5969469", "0.5958044", "0.59508985", "0.59370184", "0.5924073", "0.5915449", "0.59154356", "0.59061956", "0.5905905", "0.5902912", "0.5888579", "0.58685595", "0.58563334", "0.58540756", "0.58324283", "0.58290094", "0.5828668", "0.58284724", "0.5821263", "0.5795294", "0.5791725", "0.5783387", "0.57671696", "0.57577693", "0.5742557", "0.57411104", "0.57312465", "0.5728745", "0.5722501", "0.5722232", "0.5719436", "0.57156885", "0.57152706", "0.5713435", "0.5707256", "0.56885093", "0.5685465", "0.5683203", "0.5677424", "0.56768155", "0.567322", "0.5670079", "0.56698817", "0.56685203", "0.5661274", "0.5660685", "0.5653622", "0.564635" ]
0.71723187
5
Get BERT embeddings from a dataloader generator.
def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False): use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if use_cuda else "cpu") with torch.set_grad_enabled(False): embeddings = {'ids': [], 'embeddings': [], 'labels': [] } # get BERT training embeddings if metadata: for local_ids, local_data, local_meta, local_labels in data_generator: local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \ local_meta, \ local_labels.to(device).long() #print(local_data[0].shape) augmented_embeddings = embedding_model(local_data, local_meta) embeddings['ids'].extend(np.array(local_ids)) embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu())) embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist())) else: for local_ids, local_data, local_labels in data_generator: local_data, local_labels = local_data.to(device).long().squeeze(1), \ local_labels.to(device).long() #print(local_data[0].shape) augmented_embeddings = embedding_model(local_data) embeddings['ids'].extend(np.array(local_ids)) embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu())) embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist())) return embeddings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_embeddings(model, loader, device=torch.device('cpu')):\n embeddings = []\n labels = []\n for item in loader:\n data, label = item\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n output = model(data).squeeze(1)\n\n embedding = output.cpu().data.numpy()\n label = label.cpu().data.numpy()\n embeddings.append(embedding)\n labels.append(label)\n\n embeddings = np.array(embeddings)\n labels = np.array(labels)\n\n return embeddings, labels", "def get_embeddings(self, data):\n raise NotImplementedError()", "def load_embeddings(config, name, vocab, training_generator, validation_generator):\n\n # Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.\n # Applies down the road when/if we attempt active learning\n data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension\n train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')\n valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')\n \n \n if os.path.exists(train_embed_pkl_f):\n with open( train_embed_pkl_f, 'rb') as cache:\n train_embeddings = pickle.load(cache)\n\n with open(valid_embed_pkl_f, 'rb') as cache:\n valid_embeddings = pickle.load(cache)\n else:\n # get embeddings from scratch\n tokenizer = AutoTokenizer.from_pretrained(vocab)\n embedding_model = AbstractBert(vocab) \n\n if torch.cuda.device_count() > 1:\n print(\"GPUs Available: \", torch.cuda.device_count())\n embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])\n \n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n embedding_model.eval().to(device)\n\n logger.info(' Getting BERT/ROBERTA embeddings...')\n\n train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config[\"metadata\"])\n valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config[\"metadata\"])\n\n # save embeddings\n pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))\n pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))\n\n logger.info(' Saved full BERT/ROBERTA embeddings.')\n\n embedding_shape = train_embeddings['embeddings'][1].shape[0]\n\n return embedding_shape, train_embeddings, valid_embeddings", "def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X", "def get_loader(sentences, conversation_length, sentence_length, vocab, batch_size=100, data=None, shuffle=True):\n\n def collate_fn(data):\n \"\"\"\n Collate list of data in to batch\n\n Args:\n data: list of tuple(source, target, conversation_length, source_length, target_length)\n Return:\n Batch of each feature\n - source (LongTensor): [batch_size, max_conversation_length, max_source_length]\n - target (LongTensor): [batch_size, max_conversation_length, max_source_length]\n - conversation_length (np.array): [batch_size]\n - source_length (LongTensor): [batch_size, max_conversation_length]\n \"\"\"\n # Sort by conversation length (descending order) to use 'pack_padded_sequence'\n data.sort(key=lambda x: x[1], reverse=True)\n\n # Separate\n sentences, conversation_length, sentence_length = zip(*data)\n\n # return sentences, conversation_length, sentence_length.tolist()\n return sentences, conversation_length, sentence_length\n\n dataset = DialogDataset(sentences, conversation_length,\n sentence_length, vocab, data=data)\n\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=collate_fn)\n\n return data_loader", "def test_bert_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\"embedder_type\": \"bert\"},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"bert-base-cased\",\n \"add_terminals\": True\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n new_config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**new_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n \"\"\" test for different pretrained transformers\"\"\"\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"distilbert-base-uncased\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"albert-base-v2\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"sentence-transformers/all-mpnet-base-v2\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"roberta-base\",\n }\n }\n with pytest.raises(NotImplementedError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def build_bert_input(data, data_path, tokenizer):\n\n cache_fp = f\"{data_path[:data_path.rfind('.')]}_{type(tokenizer).__name__}_{str(BERT_MAX_LEN)}_cache\"\n if os.path.isfile(cache_fp): \n logger.info(\"Loading tokenized data from cache...\")\n all_samples = torch.load(cache_fp)\n return all_samples\n\n bert_sequences = [] \n\n # modification for turn classification task \n if 'turn' in data_path:\n for instance in data:\n seq = \"[CLS] {} [SEP] {} [SEP]\".format(instance['p'], instance['r'])\n bert_sequences.append([instance['label'], seq])\n\n # regular yes-and classifier \n else: \n \n for k in data['non-yesands'].keys():\n for non_yesand in data['non-yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(non_yesand['p'], non_yesand['r'])\n bert_sequences.append([0, seq])\n \n for k in data['yesands'].keys(): \n for yesand in data['yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(yesand['p'], yesand['r'])\n bert_sequences.append([1, seq])\n\n sentences = [x[1] for x in bert_sequences]\n labels = [x[0] for x in bert_sequences]\n logger.info(\"Tokenizing loaded data...\")\n tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n\n\n # cache_fp = data_path[:data_path.rfind('.')] + \"_\" + type(tokenizer).__name__\n # if os.path.isfile(cache_fp): \n # logger.info(\"Loading tokenized data from cache...\")\n # tokenized_texts = torch.load(cache_fp)\n # else: \n # logger.info(\"Tokenizing loaded data...\")\n # # tokenize with BERT tokenizer \n # tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n # torch.save(tokenized_texts, cache_fp)\n\n\n\n # pad input to MAX_LEN\n input_ids = pad_sequences(tokenized_texts, maxlen=BERT_MAX_LEN, dtype=\"long\", truncating=\"post\", padding=\"post\")\n\n # get attention masks and segment ids \n attention_masks = build_attention_mask(input_ids)\n segment_ids = build_segment_ids(input_ids)\n\n all_samples = [{\"input_ids\": input_ids[i], \"token_type_ids\": segment_ids[i], \"attention_mask\": attention_masks[i], \"label\": labels[i]} for i in range(len(input_ids))]\n torch.save(all_samples, cache_fp)\n\n return all_samples", "def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)", "def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)", "def __create_dataloaders(\n self, encoded_input: dict, batch_size\n ) -> Tuple[DataLoader, DataLoader, DataLoader]:\n input_ids = encoded_input[\"input_ids\"]\n token_type_ids = encoded_input[\"token_type_ids\"]\n attention_mask = encoded_input[\"attention_mask\"]\n\n input_ids_data_loader = torch.utils.data.DataLoader(\n input_ids, batch_size=batch_size, shuffle=False\n )\n token_type_ids_data_loader = torch.utils.data.DataLoader(\n token_type_ids, batch_size=batch_size, shuffle=False\n )\n attention_mask_data_loader = torch.utils.data.DataLoader(\n attention_mask, batch_size=batch_size, shuffle=False\n )\n\n return (\n input_ids_data_loader,\n token_type_ids_data_loader,\n attention_mask_data_loader,\n )", "def get_data(self):\n if self.with_encoder:\n for i in count():\n batchdata = pd.read_csv(SEQUENTIAL_TRAIN_PATH,\n nrows=GAN_BATCH_SIZE,\n skiprows=i * GAN_BATCH_SIZE + 1,\n names=SEQUENTIAL_COLUMN_NAMES.keys(),\n dtype=SEQUENTIAL_COLUMN_NAMES)\n if len(batchdata) < GAN_BATCH_SIZE:\n yield None\n batchdata = batchdata['seq_contents'].values\n yield get_data_for_lstm_ae(batchdata)\n else:\n # shuffles data\n self.encoded_data = self.encoded_data[np.random.permutation(self.encoded_data.shape[0])]\n for i in count():\n result = self.encoded_data[i*GAN_BATCH_SIZE:(i+1)*GAN_BATCH_SIZE,:]\n if result.shape[0] < GAN_BATCH_SIZE:\n yield None\n yield result", "def generate_embeddings_gen(dataset_path, classes):\n model = embeddings(INPUT_DIM)\n X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)\n # create data generators\n batch_size = 16\n train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)\n test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)\n\n return train_batch_generator, test_batch_generator", "def extract_embeddings(ds, config):\n from lidbox.models.keras_utils import KerasWrapper\n\n extractors = [(KerasWrapper.from_config_as_embedding_extractor_fn(e), _get_device_or_default(e))\n for e in config[\"extractors\"]]\n # ConcreteFunctions will be pretty-formatted starting from TF 2.3\n # https://www.tensorflow.org/guide/concrete_function#changes_for_tensorflow_23\n logger.info(\"Using %d extractors:\\n %s\",\n len(extractors),\n '\\n '.join(\"on device '{:s}':\\n {}\".format(d, _left_pad_lines(str(e), 2)) for e, d in extractors))\n\n def _append_embeddings(x):\n embeddings = []\n for extractor_fn, device in extractors:\n with tf.device(device):\n embeddings.append(extractor_fn(x[\"input\"]))\n return dict(x, embedding=tf.concat(embeddings, axis=1))\n\n batch_size = tf.constant(config.get(\"batch_size\", 1), tf.int64)\n logger.info(\"Batching inputs with batch size %s, extracting embeddings in batches.\", batch_size.numpy())\n ds = (ds.batch(batch_size)\n .prefetch(TF_AUTOTUNE)\n .map(_append_embeddings, num_parallel_calls=TF_AUTOTUNE))\n\n if not config.get(\"no_unbatch\", False):\n logger.info(\"Unbatching after embedding extraction\")\n ds = ds.unbatch()\n\n return ds", "def embeddings(self):\n self._ensure_is_connected()\n return self._embeddings", "def pickle_embeddings(model, pickle_name, dataset_name: str, training_ds_generator=None):\n import pickle\n layer_name = 'embedding'\n encoder_model = Model(inputs=model.input,\n outputs=model.get_layer(layer_name).output)\n if training_ds_generator is None:\n training_ds_generator = data_genetator.MYGenerator('train', batch_size=100)\n\n training_labels = np.copy(training_ds_generator.gt)\n embeddings = encoder_model.predict_generator(training_ds_generator)\n\n os.makedirs(f'./embeddings/{dataset_name}', exist_ok=True)\n\n with open(f'./embeddings/{dataset_name}/embeddings_for_{pickle_name}.pkl', 'wb') as pkl_out:\n pickle.dump((embeddings, training_labels), pkl_out)", "def get_embeddings() -> tuple:\n # Initialize the model loading Universal Sentense Encoder\n # into a KerasLayer from Kaggle dataset file\n model = tf.keras.Sequential(\n [KerasLayer(encoder_path, input_shape=[], dtype=tf.string,\n output_shape=[512], trainable=False),\n # tf.keras.layers.Layer(512, dtype=tf.float16) # To reduce memory footprint\n ]\n )\n\n train_emb = model.predict(data_train['text'])\n print('Train texts converted into embeddings. Shape:', train_emb.shape)\n\n test_emb = model.predict(data_test['text'])\n print('Test texts converted into embeddings. Shape:', test_emb.shape)\n\n return train_emb, test_emb", "def generate_embeddings_sentence_test_data(data, path_out):\n flair.device = torch.device('cpu')\n dicts = []\n # init multilingual BERT\n bert_embedding = TransformerDocumentEmbeddings('bert-base-multilingual-cased')\n counter = 0\n for entry in data:\n print(\"Counter: \", counter)\n counter += 1\n text = entry[\"sentence\"]\n id = entry[\"id\"]\n sent = Sentence(text)\n bert_embedding.embed(sent)\n vec = sent.get_embedding().detach().numpy()\n dicts.append((id,vec))\n gc.collect()\n result = dicts\n file = open(path_out, \"wb\")\n pickle.dump(result, file)\n file.close()\n return result", "def get_embeddings():\n # Load the raw embedding data\n X_train = np.load('./train_embeddings.npy')\n \n y_train = np.load('./train_labels.npy')\n \n X_valid = np.load('./valid_embeddings.npy')\n \n y_valid = np.load('./valid_labels.npy')\n \n X_test = np.load('./test_embeddings.npy')\n \n y_test = np.load('./test_labels.npy')\n\n #return X_train, y_train\n return X_train, y_train, X_valid, y_valid, X_test, y_test", "def get_loader(data_list, config, train=True):\n \n with open(config[\"label_map\"], \"r\") as f:\n label_map = json.load(f)\n\n dataset = GoogleSpeechDataset(\n data_list=data_list,\n label_map=label_map,\n audio_settings=config[\"hparams\"][\"audio\"],\n aug_settings=config[\"hparams\"][\"augment\"] if train else None,\n cache=config[\"exp\"][\"cache\"]\n )\n\n dataloader = DataLoader(\n dataset,\n batch_size=config[\"hparams\"][\"batch_size\"],\n num_workers=config[\"exp\"][\"n_workers\"],\n pin_memory=config[\"exp\"][\"pin_memory\"],\n shuffle=True if train else False\n )\n\n return dataloader", "def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def load_batched_dataset(is_train, embeddings):\n tensorize_text_fn = build_tensorize_text_fn(embeddings)\n unbatched = load_data(is_train)\n\n def tensorize(x):\n x[\"premise\"] = tensorize_text_fn(x[\"premise\"])\n x[\"hypothesis\"] = tensorize_text_fn(x[\"hypothesis\"])\n return x\n\n unbatched = unbatched.map(tensorize)\n\n hist_bins = list(range(5, 500, 5))\n batched = unbatched.apply(\n ops.bucket_by_quantiles(lambda x: x[\"premise\"][\"len\"], FLAGS.batch_size,\n 10, hist_bins))\n if is_train:\n batched = batched.shuffle(1000, reshuffle_each_iteration=True)\n batched = batched.repeat()\n\n # Get (features, label) format for tf.estimator\n return batched.map(lambda x: (x, x[\"label\"]))", "def get_data_loaders_2sentences():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 4:\n history_chatbot = history[1::2]\n\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def get_dataloaders(self,\n dataset_locations_dict,\n batch_size=32,\n test_only=False):\n # if test_only:\n # test_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TEST\"],\n # transform=None,\n # freq_threshold=5,\n # vocab_file=dataset_locations_dict[\"VOCAB\"],\n # create_vocab=False)\n # return get_dataloader(test_dataset,\n # test_dataset.vocab,\n # batch_size=1,shuffle=False,num_workers=0,\n # add_collate_fn=True)\n \n train_val_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TRAIN_TEST\"],\n transform=None,\n freq_threshold=5,\n vocab_file=dataset_locations_dict[\"VOCAB\"],\n create_vocab=False)\n \n # test_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TEST\"],\n # transform=None,\n # freq_threshold=5,\n # vocab_file=dataset_locations_dict[\"VOCAB\"],\n # create_vocab=False)\n \n train_ds_len = int(0.9*len(train_val_dataset))\n \n val_ds_len = int(0.05*len(train_val_dataset))\n \n test_ds_len = len(train_val_dataset)-train_ds_len-val_ds_len\n \n train_dataset,val_dataset,test_dataset = random_split(train_val_dataset,\n lengths=[train_ds_len,val_ds_len,test_ds_len],\n generator=torch.Generator().manual_seed(seed))\n \n train_dataloader = get_dataloader(train_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=True,num_workers=0,\n add_collate_fn=True)\n val_dataloader = get_dataloader(val_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=False,num_workers=0,\n add_collate_fn=True)\n test_dataloader = get_dataloader(test_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=False,num_workers=0,\n add_collate_fn=True)\n \n # test_dataset.df.to_csv('sentiment_analysis_test_dataset_4990.csv')\n print(f\"Training Dataset size : {len(train_dataset)}\\n\")\n print(f\"Validation Dataset size : {len(val_dataset)}\\n\")\n print(f\"Test Dataset size : {len(test_dataset)}\\n\")\n \n if test_only:\n return test_dataloader\n return train_dataloader,val_dataloader,test_dataloader", "def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")", "def sample_for_inception(model, encoder, batch_size, dataloader, device):\n\n captions = []\n gen_imgs = []\n # get sample captions\n done = False\n while not done:\n for (_, labels_batch, captions_batch) in dataloader:\n captions += captions_batch\n conditional_embeddings = encoder(labels_batch.to(device), captions)\n imgs = model.sample(conditional_embeddings).cpu()\n gen_imgs.append(imgs)\n\n if len(captions) > batch_size:\n done = True\n break\n\n gen_imgs = torch.cat(gen_imgs).numpy()\n gen_imgs = np.clip(gen_imgs, 0, 1)\n return(gen_imgs)", "def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}", "def get_features(self) -> Generator[np.ndarray, None, None]:\n for text in self.texts:\n yield embed(text)", "def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)", "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def generate_embeddings(vae, dataset):\n data = ((torch.unsqueeze(img,0), label) for img, label in dataset)\n data = ((vae.encoder(tens), label) for tens, label in data)\n data = ((vae.codebook(emb),label) for emb, label in data)\n data = ((torch.flatten(img),label) for img, label in data)\n data = (torch.cat([inds,Tensor([label]).int()]) for inds, label in data)\n return data", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def get_embeddings(self, entities, type='entity'):\n return None", "def make_dataloaders(self):\n # read annotations\n annos = self._read_annos(self.anno_path)\n # make dictionary of word-index correspondence\n wtoi, itow = self._make_word_dictionary(annos)\n batch_size = self.cfg.TRAIN.BATCH_SIZE\n num_workers = self.cfg.TRAIN.NUM_WORKERS\n dataloaders = {}\n for s in self.splits:\n if \"train\" in s:\n dataset = AnetCapBasicDataset(self.cfg, self.vid_path, s, wtoi, itow, annos[s])\n dataloaders[s] = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=dataset.collate_fn, drop_last=True, shuffle=True)\n else:\n dataset = AnetCapBasicDataset(self.cfg, self.vid_path, s, wtoi, itow, annos[s])\n dataloaders[s] = DataLoader(dataset=dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=dataset.collate_fn, drop_last=False, shuffle=False)\n return dataloaders", "def get_training_dataloaders(data_path, caption_idx, shuffle):\n\treturn {mode: _get_training_dataloader(data_path / mode, caption_idx, shuffle) for mode in ('train', 'val')}", "def test_bert_embedder_unsupported(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"distilroberta-base\",\n \"add_terminals\": True\n },\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n with pytest.raises(NotImplementedError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def extract_embeddings():\n session, _ = session_bundle.load_session_bundle_from_path(FLAGS.model_path)\n all_paths = FLAGS.input_path.split(',')\n with tf.gfile.Open(FLAGS.output_path, 'w') as out:\n for dataset, pattern in enumerate(all_paths, start=FLAGS.offset):\n paths = tf.gfile.Glob(pattern)\n for path in paths:\n make_request(dataset, path, out, session)", "def test_extract_embeddings():\n with pytest.raises(OSError):\n model = BERTopic(bert_model='not_a_model')\n model._extract_embeddings([\"Some document\"])\n\n # model = BERTopic(bert_model='distilbert-base-nli-mean-tokens')\n # embeddings = model._extract_embeddings([\"Some document\"])\n #\n # assert isinstance(embeddings, np.ndarray)\n # assert embeddings.shape == (1, 768)", "def get_embeddings(faces):\n\t# convert into an array of samples\n\tsamples = np.asarray(faces, 'float32')\n\t# prepare the face for the model, e.g. center pixels\n\tsamples = preprocess_input(samples, version=2)\n\t# create a vggface model\n\tmodel = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')\n\t# perform prediction\n\tyhat = model.predict(samples)\n\treturn yhat", "def load_data(data_feeder):\n return data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)", "def __init__(\n self,\n datasets_manager: DatasetsManager = None,\n dropout_value: float = 0.0,\n aggregation_type: str = \"sum\",\n bert_type: str = \"bert-base-uncased\",\n word_tokens_namespace=\"tokens\",\n device: Union[torch.device, str] = torch.device(\"cpu\"),\n ):\n super(BertEmbedder, self).__init__()\n \n self.datasets_manager = datasets_manager\n self.dropout_value = dropout_value\n self.aggregation_type = aggregation_type\n self.bert_type = bert_type\n if isinstance(device, str):\n self.device = torch.device(device)\n else:\n self.device = device\n self.word_tokens_namespace = word_tokens_namespace\n self.msg_printer = wasabi.Printer()\n self.embedder_name = bert_type\n\n self.scibert_foldername_mapping = {\n \"scibert-base-cased\": \"scibert_basevocab_cased\",\n \"scibert-sci-cased\": \"scibert_scivocab_cased\",\n \"scibert-base-uncased\": \"scibert_basevocab_uncased\",\n \"scibert-sci-uncased\": \"scibert_scivocab_uncased\",\n }\n\n if \"scibert\" in self.bert_type:\n foldername = self.scibert_foldername_mapping[self.bert_type]\n self.model_type_or_folder_url = os.path.join(\n EMBEDDING_CACHE_DIR, foldername, \"weights.tar.gz\"\n )\n\n else:\n self.model_type_or_folder_url = self.bert_type\n\n # load the bert model\n with self.msg_printer.loading(\" Loading Bert tokenizer and model. \"):\n self.bert_tokenizer = TokenizerForBert(\n bert_type=self.bert_type, do_basic_tokenize=False\n )\n self.bert_numericalizer = NumericalizerForTransformer(\n tokenizer=self.bert_tokenizer\n )\n self.model = BertModel.from_pretrained(self.model_type_or_folder_url)\n self.model.eval()\n self.model.to(self.device)\n\n self.msg_printer.good(f\"Finished Loading {self.bert_type} model and tokenizer\")\n self.embedding_dimension = self.get_embedding_dimension()", "def get_embeddings(self, in_data):\n context, da = in_data\n if self.fixed_divide:\n da_emb = super(ContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=True)\n else:\n da_emb = super(ContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=False)\n max_context_len = (self.max_context_len + 3 * self.max_da_len) - len(da_emb)\n context_emb = []\n for tok in context[-max_context_len:]:\n context_emb.append(self.dict_token.get(tok, self.UNK_TOKEN))\n\n padding = [self.UNK_TOKEN] * (max_context_len - len(context))\n\n if self.use_div_token:\n return padding + context_emb + [self.DIV_TOKEN] + da_emb\n return padding + context_emb + da_emb", "def _batch_thm_embedding(self, thms: List[Text]) -> List[THM_EMB_TYPE]:\n # The checkpoint should have exactly one value in this collection.\n thms = self._thm_string_for_predictions(thms)\n embeddings = self._sess.run(\n fetches=self._graph.get_collection('thm_net'),\n feed_dict={self._graph.get_collection('thm_string')[0]: thms})[0]\n return embeddings", "def _read_adv_embeddings(identity, target):\n embeddings_file = os.path.join(\n FLAGS.output_directory,\n identity,\n FLAGS.attack_type,\n target\n )\n embeddings_file = os.path.join(FLAGS.image_directory,\n identity,\n 'embeddings.h5')\n with h5py.File(embeddings_file, 'r') as f:\n return f['embeddings'][:].astype(np.float32)", "def get_embeddings(self, in_data):\n context, da = in_data\n if self.fixed_divide:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=True)\n else:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=False)\n\n # Shubhangi: what this step essentially does is it replaces the context words by their token, with UNK as default.\n # again , we don't need this since our context data is essentially vectors therefore commenting this out\n # similary we don't need context embedding , that's exactly what context is already .\n\n # context_emb = []\n context_emb = [float(parameter[0]) for parameter in context]\n\n # for tok in context[-max_context_len:]:\n # context_emb.append(self.dict_token.get(tok, self.UNK_TOKEN))\n\n # Shubhangi: padding is needed because each context sentence could be of different length ,\n # we don't need to include context in padding as we're going to have a fixed size\n # (max_context_len - len(context)) = 0\n\n\n # padding = [self.UNK_TOKEN] * (max_context_len - len(context))\n\n # Shubhangi: padding might be harmless for now therefore not removing ,\n # essentially what this is doing is concatenating the arrays and sending\n if self.use_div_token:\n return context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + da_emb\n return context_emb + da_emb", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def get_dataloaders(datasets, split, args, is_eval=False):\n dataloaders = []\n for task, dataset in datasets.items():\n if is_eval:\n num_rows = dataset.num_rows if args.eval_rows == -1 else args.eval_rows\n else:\n num_rows = dataset.num_rows if args.train_rows == -1 else args.train_rows\n all_input_ids = np.zeros([num_rows, args.max_length])\n all_attention_mask = np.zeros([num_rows, args.max_length])\n all_token_type_ids = np.zeros([num_rows, args.max_length])\n for i in range(num_rows):\n features = dataset[i]\n curr_len = len(features[\"attention_mask\"])\n all_input_ids[i,:curr_len] = features[\"input_ids\"]\n all_attention_mask[i,:curr_len] = features[\"attention_mask\"]\n all_token_type_ids[i,:curr_len] = features[\"token_type_ids\"]\n all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)\n all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)\n all_label = torch.tensor(dataset[:num_rows][\"label\"], dtype=torch.long)\n if task == \"stsb\":\n all_label = all_label.float()\n \n data = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label)\n if split in [\"train\", \"support\"]:\n sampler = RandomSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.train_batch_size)\n else:\n sampler = SequentialSampler(data)\n dataloader = DataLoader(data, sampler=sampler, batch_size=args.eval_batch_size)\n dataloaders.append(dataloader)\n return dataloaders", "def get_data_loaders_4sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"]\n #history_complete.append(history)\n if len(history_splitted) > (len(persona)-1):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def get_data_loaders_3sentences():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_3generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+3):]\n #history_complete.append(history)\n if len(history) > 6:\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def test_bpe_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": { # default token_spans_pooling_type is \"first\"\n \"emb_dim\": 30, \"tokenizer_type\": \"bpe-tokenizer\", \"add_terminals\": True\n },\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"max\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean_sqrt\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"last\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {\n **config[\"params\"], \"use_crf_layer\": False, \"token_spans_pooling_type\": \"first\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def test_batch_generator(self, dir_name):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n labels = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(dir_name + \"all.txt\")\n sample_gen = self.dev_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count, label in sample_gen:\n seq_lengths[i], labels[i], unique_counts[i] = seq_length, label, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts, labels\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i], labels[:i]\n\n fi.close()", "def RandomDataloader(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = torch.from_numpy(seq)\n\n # The input includes an additional channel used for the delimiter\n inp = torch.zeros(seq_len + 1, batch_size, seq_width + 1)\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield inp.float(), outp.float()", "def _read_fasttext_embeddings(self, vocab: vocabs.Vocab, init_fastext):\n with open(init_fastext, encoding='utf-8') as embeddings_file_handle:\n _, dimension = next(embeddings_file_handle).split()\n if int(dimension) != self.emb_dim:\n raise Exception(f\"An embedding size of {self.emb_dim} was specified, but the pretrained embeddings have size {dimension}\")\n\n # Poor man's Glorot initializer for missing embeddings\n bound = np.sqrt(6/(self.vocab_size + self.emb_dim))\n\n total_embs = 0\n in_vocab = 0\n missing = 0\n\n embeddings = np.empty((self.vocab_size, self.emb_dim), dtype='float')\n found = np.zeros(self.vocab_size, dtype='bool_')\n\n for line in embeddings_file_handle:\n total_embs += 1\n word, vals = line.strip().split(' ', 1)\n if word in vocab.w2i:\n in_vocab += 1\n index = vocab.w2i[word]\n embeddings[index] = np.fromstring(vals, sep=\" \")\n found[index] = True\n\n for i in range(self.vocab_size):\n if not found[i]:\n missing += 1\n embeddings[i] = np.random.uniform(-bound, bound, self.emb_dim)\n\n logger.info(f\"{in_vocab} vocabulary matches out of {total_embs} total embeddings; \"\n f\"{missing} vocabulary words without a pretrained embedding out of {self.vocab_size}\")\n\n return embeddings", "def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def get_batch(self, data, bucket_id):\n encoder_size, decoder_size = self.buckets[bucket_id]\n encoder_inputs, decoder_inputs = [], []\n\n # Get a random batch of encoder and decoder inputs from data,\n # pad them if needed, reverse encoder inputs and add GO to decoder.\n for _ in xrange(self.batch_size):\n encoder_input, decoder_input = random.choice(data[bucket_id])\n\n # Encoder inputs are padded and then reversed.\n encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))\n encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))\n\n # Decoder inputs get an extra \"GO\" symbol, and are padded then.\n decoder_pad_size = decoder_size - len(decoder_input) - 1\n decoder_inputs.append([data_utils.GO_ID] + decoder_input +\n [data_utils.PAD_ID] * decoder_pad_size)\n\n # Now we create batch-major vectors from the data selected above.\n batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []\n\n # Batch encoder inputs are just re-indexed encoder_inputs.\n for length_idx in xrange(encoder_size):\n batch_encoder_inputs.append(\n np.array([encoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(self.batch_size)], dtype=np.int32))\n\n # Batch decoder inputs are re-indexed decoder_inputs, we create weights.\n for length_idx in xrange(decoder_size):\n batch_decoder_inputs.append(\n np.array([decoder_inputs[batch_idx][length_idx]\n for batch_idx in xrange(self.batch_size)], dtype=np.int32))\n\n # Create target_weights to be 0 for targets that are padding.\n batch_weight = np.ones(self.batch_size, dtype=np.float32)\n for batch_idx in xrange(self.batch_size):\n # We set weight to 0 if the corresponding target is a PAD symbol.\n # The corresponding target is decoder_input shifted by 1 forward.\n if length_idx < decoder_size - 1:\n target = decoder_inputs[batch_idx][length_idx + 1]\n if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:\n batch_weight[batch_idx] = 0.0\n batch_weights.append(batch_weight)\n return batch_encoder_inputs, batch_decoder_inputs, batch_weights", "def load_bert(self):\n self.hermes.info(\"Load the bert...\")\n model = load_trained_model_from_checkpoint(self.conf.bert[\"config\"], self.conf.bert[\"checkpoint\"])\n\n self.hermes.info(\"Build the tokenizer...\")\n tokenizer = self.poseidon.build_tokenizer()\n\n return model, tokenizer", "def greedy_generation(self, enc_input, dec_input, sampling_bias=0.0):\n # Setting sampling_bias to 0.0 results in decoder always receiving its output from previous time-step\n # as input during current time-step\n _, batch_idx, _ = self.infer_step(enc_input, dec_input, sampling_bias) # 0.0 == always from previous\n # Convert predicted word indices to word tokens\n batch_idx = [np.squeeze(array).tolist() for array in np.split(batch_idx, batch_idx.shape[0], axis=0)]\n # Assemble output sequences from predictions; truncate output after the sentence-final <EOS> tag\n batch_boundaries = [idx_list.index(self.vocab.eos_id) if self.vocab.eos_id in idx_list else len(idx_list)\n for idx_list in batch_idx]\n batch_sentences = [[self.vocab.index_to_word[idx] for idx in batch_idx[i][:batch_boundaries[i]]]\n for i in range(len(batch_idx))]\n batch_sentences = [' '.join(word_list) + '.' for word_list in batch_sentences]\n return batch_sentences", "def _c4_data_unbatched(tokenizer, max_len):\n cls_id = tokenizer.bos_id()\n sep_id = tokenizer.eos_id()\n pad_id = tokenizer.pad_id()\n\n ds = tfds.load(name=\"c4/en\", split=\"train\", shuffle_files=True)\n ds = ds.repeat()\n ds = ds.shuffle(1024)\n ds = ds.batch(16) # Batch documents to potentially speed up input pipeline\n\n input_ids_buf = np.full((1024, max_len), pad_id, dtype=np.int32)\n type_ids_buf = np.full((1024, max_len), pad_id, dtype=np.int32)\n next_sentence_labels_buf = np.full(1024, -1, dtype=np.int32)\n\n for batch in tfds.as_numpy(ds):\n for text in batch[\"text\"]:\n text = str(text, \"utf-8\")\n lines = [tokenizer.EncodeAsIds(line) for line in text.splitlines()]\n j = 0\n while j < len(lines) - 1:\n if len(lines[j]) + len(lines[j+1]) > max_len - 3:\n j += 1\n else:\n idx = np.random.randint(input_ids_buf.shape[0])\n if next_sentence_labels_buf[idx] != -1:\n yield {\n \"input_ids\": input_ids_buf[idx].copy(),\n \"type_ids\": type_ids_buf[idx].copy(),\n \"next_sentence_labels\": next_sentence_labels_buf[idx].copy(),\n }\n input_ids_buf[idx] = pad_id\n type_ids_buf[idx] = 1\n\n cum_len = 0\n for k in range(j, len(lines)):\n cum_len += len(lines[k])\n if cum_len > max_len - 3:\n k -= 1\n break\n selected_lines = lines[j:k+1]\n j = k + 1\n\n pivot = np.random.randint(1, len(selected_lines))\n if np.random.random() < 0.5:\n datum = [cls_id]\n for tokens in selected_lines[:pivot]:\n datum.extend(tokens)\n datum.append(sep_id)\n type_ids_buf[idx, :len(datum)] = 0\n for tokens in selected_lines[pivot:]:\n datum.extend(tokens)\n datum.append(sep_id)\n next_sentence_label = 0\n type_ids_buf[idx, len(datum):] = 0\n else:\n datum = [cls_id]\n for tokens in selected_lines[pivot:]:\n datum.extend(tokens)\n datum.append(sep_id)\n type_ids_buf[idx, :len(datum)] = 0\n for tokens in selected_lines[:pivot]:\n datum.extend(tokens)\n datum.append(sep_id)\n next_sentence_label = 1\n type_ids_buf[idx, len(datum):] = 0\n\n input_ids_buf[idx] = pad_id\n input_ids_buf[idx, :len(datum)] = datum\n next_sentence_labels_buf[idx] = next_sentence_label", "def compute_embeddings(encoder, data_batches):\n\n vectors = []\n for batch in iter(data_batches):\n X, Y = batch\n X_embedded = encoder(X)\n for vec in np.array(X_embedded):\n vectors.append(vec)\n vectors = np.array(vectors)\n\n return vectors", "def _embeddings(self, xs):\n n_feats, batch_size, seq_len = xs.size()\n\n assert n_feats == self.n_feats\n\n res = [emb(x) for emb, x in zip(self.embeddings, xs)]\n x = torch.cat(res, 2)\n\n return x", "def entities(self):\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]", "def entities(self):\n if 'ner' not in self.annotators:\n return None\n return [t[self.NER] for t in self.data]", "def get_data_loaders_1sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1_sentence_final_generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 3:\n history_chatbot = history[1]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def pretrained(name=\"albert_base_uncased\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(AlbertEmbeddings, name, lang, remote_loc)", "def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break", "def test_char_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": { # default token_spans_pooling_type is \"first\"\n \"emb_dim\": 30, \"tokenizer_type\": \"char-tokenizer\"},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"add_terminals\": \"True\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)", "def real_imgs(batch_size, dataloader, device):\n\n captions = []\n gen_imgs = []\n # get sample captions\n done = False\n while not done:\n for (imgs, labels_batch, captions_batch) in dataloader:\n captions += captions_batch\n# conditional_embeddings = encoder(labels_batch.to(device), captions)\n# imgs = model.sample(conditional_embeddings).cpu()\n gen_imgs.append(imgs)\n\n if len(captions) > batch_size:\n done = True\n break\n\n gen_imgs = torch.cat(gen_imgs).numpy()\n gen_imgs = np.clip(gen_imgs, 0, 1)\n return(gen_imgs)", "def generator(self):\n\n # Each thread gets its own randomized set of keys\n keys = self.loader.keys()\n\n while True:\n random.shuffle(keys)\n data_batch = []\n label_batch = []\n\n for key in keys:\n data = self.loader.get(key)\n s = StringIO(data)\n img = PIL.Image.open(s)\n img = img.resize((224, 224))\n img = img.convert('RGB')\n data_batch.append(np.array(img))\n\n label_str = self._classname_from_key(key)\n label_int = self._classname_to_label[label_str]\n label_arr = np.zeros(self.num_classes())\n label_arr[label_int] = 1 # one-hot encoding\n label_batch.append(label_arr)\n\n if len(data_batch) == 32: # batch size\n yield np.array(data_batch), np.array(label_batch)\n data_batch = []\n label_batch = []", "def build_data_loader(txt_path, in_vocab_path, out_vocab_path,\n batch_size=1, drop_last=False, num_workers=0):\n dataset = PuncDataset(txt_path, in_vocab_path, out_vocab_path)\n batch_sampler = RandomBucketBatchSampler(dataset,\n batch_size=batch_size,\n drop_last=drop_last)\n collate_fn = TextAudioCollate()\n data_loader = DataLoader(dataset, batch_sampler=batch_sampler,\n collate_fn=collate_fn, num_workers=num_workers)\n return data_loader", "def fit_generator(self, generator, nb_epochs=20, **kwargs):\n logger.info(\"Performing adversarial training using %i attacks.\", len(self.attacks))\n size = generator.size\n batch_size = generator.batch_size\n nb_batches = int(np.ceil(size / batch_size))\n ind = np.arange(generator.size)\n attack_id = 0\n\n # Precompute adversarial samples for transferred attacks\n logged = False\n self._precomputed_adv_samples = []\n for attack in self.attacks:\n if \"targeted\" in attack.attack_params:\n if attack.targeted:\n raise NotImplementedError(\"Adversarial training with targeted attacks is currently not implemented\")\n\n if attack.classifier != self.classifier:\n if not logged:\n logger.info(\"Precomputing transferred adversarial samples.\")\n logged = True\n\n next_precomputed_adv_samples = None\n for batch_id in range(nb_batches):\n # Create batch data\n x_batch, y_batch = generator.get_batch()\n x_adv_batch = attack.generate(x_batch, y=y_batch)\n if next_precomputed_adv_samples is None:\n next_precomputed_adv_samples = x_adv_batch\n else:\n next_precomputed_adv_samples = np.append(next_precomputed_adv_samples, x_adv_batch, axis=0)\n self._precomputed_adv_samples.append(next_precomputed_adv_samples)\n else:\n self._precomputed_adv_samples.append(None)\n\n for i_epoch in range(nb_epochs):\n logger.info(\"Adversarial training epoch %i/%i\", i_epoch, nb_epochs)\n\n # Shuffle the indices of precomputed examples\n np.random.shuffle(ind)\n\n all_accuracies = []\n all_accuracies_normal = []\n all_accuracies_adv = []\n for batch_id in range(nb_batches):\n # Create batch data\n x_batch, y_batch = generator.get_batch()\n x_batch = x_batch.copy()\n #x_batch_DEBUG = copy.deepcopy(x_batch)\n\n # Choose indices to replace with adversarial samples\n nb_adv = int(np.ceil(self.ratio * x_batch.shape[0]))\n attack = self.attacks[attack_id]\n if self.ratio < 1:\n adv_ids = np.random.choice(x_batch.shape[0], size=nb_adv, replace=False)\n else:\n adv_ids = list(range(x_batch.shape[0]))\n np.random.shuffle(adv_ids)\n\n # If source and target models are the same, craft fresh adversarial samples\n if attack.classifier == self.classifier:\n x_batch[adv_ids] = attack.generate(x_batch[adv_ids], y=y_batch[adv_ids])\n\n #print(\"Max pertrubations on adversarial samples\")\n #delta_ = np.max(x_batch_DEBUG[adv_ids, ...] - x_batch[adv_ids, ...], 2)\n #print(delta_)\n \n\n # Otherwise, use precomputed adversarial samples\n else:\n x_adv = self._precomputed_adv_samples[attack_id]\n x_adv = x_adv[ind[batch_id * batch_size : min((batch_id + 1) * batch_size, size)]][adv_ids]\n x_batch[adv_ids] = x_adv\n\n #Gaussian augmentation to normal samples\n all_ids = range(x_batch.shape[0])\n normal_ids = [i_ for i_ in all_ids if i_ not in adv_ids]\n if self.augment==1:\n x_batch_normal = x_batch[normal_ids, ...]\n y_batch_normal = y_batch[normal_ids, ...]\n\n a = np.random.rand()\n noise = a * 0.008 * np.random.rand(*x_batch_normal.shape) \n #add noise\n x_batch_normal_noisy = x_batch_normal + noise.astype('float32')\n x_batch = np.vstack((x_batch, x_batch_normal_noisy))\n y_batch = np.concatenate((y_batch, y_batch_normal))\n\n\n\n # Fit batch\n #JATI--start\n self.classifier.set_learning_phase(True)\n #JATI--end\n self.classifier.fit(x_batch, y_batch, nb_epochs=1, batch_size=x_batch.shape[0], **kwargs)\n attack_id = (attack_id + 1) % len(self.attacks) \n\n #calculate training accuracy\n #JATI--start\n self.classifier.set_learning_phase(False)\n #JATI--end\n predictions = self.classifier.predict(x_batch)\n acc = np.mean(predictions.argmax(1)==y_batch)\n predictions_adv = predictions[adv_ids]\n acc_adv = np.mean(predictions_adv.argmax(1) == y_batch[adv_ids])\n #all_ids = range(x_batch.shape[0])\n #normal_ids = [i_ for i_ in all_ids if i_ not in adv_ids]\n predictions_normal = predictions[normal_ids]\n acc_normal = np.mean(predictions_normal.argmax(1)==y_batch[normal_ids])\n\n print(\"Batch\", batch_id, \"/\", nb_batches, \": Acc = \", round(acc,4), \"\\tAcc adv =\", round(acc_adv,4), \"\\tAcc normal =\", round(acc_normal,4))\n logger.info(\"Batch {}/{}: Acc = {:.6f}\\tAcc adv = {:.6f}\\tAcc normal = {:.6f}\".format(batch_id, nb_batches, acc, acc_adv, acc_normal))\n\n all_accuracies.append(acc)\n all_accuracies_normal.append(acc_normal)\n all_accuracies_adv.append(acc_adv)\n\n print()\n print('--------------------------------------')\n print(\"EPOCH\", i_epoch, \"/\", nb_epochs, \": Acc = \", round(np.mean(all_accuracies),4), \"\\tAcc adv =\", round(np.mean(all_accuracies_adv),4), \"\\tAcc normal =\", round(np.mean(all_accuracies_normal),4))\n print('--------------------------------------')\n print()\n logger.info(\"EPOCH {}/{}: Acc = {:.6f}\\tAcc adv = {:.6f}\\tAcc normal = {:.6f}\".format(i_epoch, nb_epochs, np.mean(all_accuracies), np.mean(all_accuracies_adv), np.mean(all_accuracies_normal)))", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def load_unpacker_dataset(sentences):\n return TFRecordDataset([path.join(TFRUDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'l': tf.FixedLenFeature([NL], tf.float32),\n 't': tf.FixedLenFeature([NT], tf.float32)\n }\n )\n )\\\n .map(\n lambda feature: (feature['l'], feature['s'], feature['t'])\n )", "def test_bert_embedder_frozen_params(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": { # default embedder_output_pooling_type for bert is \"first\"\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"distilbert-base-uncased\",\n \"update_embeddings\": False\n },\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n # fit the model\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n # assert only some weights are trainable\n clf = model._clf\n n_requires_grad, n_total = get_num_weights_of_model(clf)\n assert n_requires_grad < n_total, print(n_requires_grad, n_total)\n\n # check if dumping and loading partial state dict logs required messages & throws no errors\n os.makedirs(GENERATED_TMP_FOLDER, exist_ok=True)\n clf.dump(GENERATED_TMP_FOLDER)\n new_clf = clf.load(GENERATED_TMP_FOLDER)\n shutil.rmtree(GENERATED_TMP_FOLDER)\n\n # do predictions with loaded model\n model._clf = new_clf\n model_predictions_assertions(model)", "def create_dataloaders(data_dir):\n\n trng_dataset = datasets.ImageFolder(data_dir / TRNG_FOLDER,\n transform=flowernet.trng_transform)\n trng_dataloader = torch.utils.data.DataLoader(trng_dataset,\n batch_size=64,\n shuffle=True)\n\n valn_dataset = datasets.ImageFolder(data_dir / VALN_FOLDER,\n transform=flowernet.pred_transform)\n valn_dataloader = torch.utils.data.DataLoader(valn_dataset,\n batch_size=64,\n shuffle=True)\n\n return trng_dataloader, valn_dataloader", "def load_datset_train_tokenization(dataset,tokenizer,tokenizer_trainer,batch_size : int):\n def make_batch_iter(dataset):\n for i in range(0, len(dataset), batch_size):\n yield dataset[i : i + batch_size][\"code\"]\n tokenizer.train_from_iterator(make_batch_iter(), trainer=tokenizer_trainer, length=len(dataset))\n return tokenizer", "def decode(self, embeddings):\n def denormalize(img):\n _img = img + 1.0\n _img = _img * (255.0 / 2.0)\n return _img.astype(np.uint8)\n\n i = 0\n N = len(embeddings)\n imgs = []\n while True:\n end = min(N, i + self.batch_size)\n batch = embeddings[i: end]\n\n size = end - i\n if size < self.batch_size:\n batch += self._embed_padding[: self.batch_size - size]\n\n _imgs = self.sess.run(self.output_layer, feed_dict={self.embed_layer: batch})\n imgs += [denormalize(_imgs[i]) for i in range(size)]\n\n i += self.batch_size\n if i >= N - 1:\n break\n\n return imgs", "def __call__(\n self, data_batch: Dict[str, List[str]]\n ) -> Tuple[\n BatchEncoding,\n List[Dict[str, Union[int, str]]],\n List[SquadExample],\n List[SquadFeatures],\n ]:\n self._check_values_len(data_batch)\n concatenated_batch, evidences = self._concatenate_batch(data_batch)\n dataset, examples, features = load_examples(\n concatenated_batch, self.tokenizer, evaluate=True, output_examples=True\n )\n\n input_ids = [torch.unsqueeze(instance[0], 0) for instance in dataset]\n attention_mask = [torch.unsqueeze(instance[1], 0) for instance in dataset]\n token_type_ids = [torch.unsqueeze(instance[2], 0) for instance in dataset]\n\n output = {\n \"input_ids\": torch.cat(input_ids, axis=0),\n \"attention_mask\": torch.cat(attention_mask, axis=0),\n \"token_type_ids\": torch.cat(token_type_ids, axis=0),\n }\n output = BatchEncoding(output)\n\n return output, evidences, examples, features", "def load_data(embed_words=None):\n assert embed_words is None or type(embed_words) == list\n\n profs = load_professions(embed_words=embed_words)\n gender_seed = load_gender_seed(embed_words=embed_words)\n eq_pairs = load_equalize_pairs()\n def_pairs = load_definitional_pairs(embed_words=embed_words)\n return gender_seed, def_pairs, eq_pairs, profs", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()", "def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict", "def test_extract_embeddings():\n docs = [\"some document\"]\n model = BERTopic(embedding_model=\"distilbert-base-nli-stsb-mean-tokens\")\n bertopic_embeddings = model._extract_embeddings(docs)\n\n assert isinstance(bertopic_embeddings, np.ndarray)\n assert bertopic_embeddings.shape == (1, 768)\n\n sentence_embeddings = embedding_model.encode(docs, show_progress_bar=False)\n assert np.array_equal(bertopic_embeddings, sentence_embeddings)", "def load_dataloaders(args):\n logger.info(\"Loading dataloaders...\")\n p_path = os.path.join(\"./data/\", \"df_unencoded.pkl\")\n train_path = os.path.join(\"./data/\", \"df_encoded.pkl\")\n if (not os.path.isfile(p_path)) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=False)\n elif os.path.isfile(p_path) and (not os.path.isfile(train_path)):\n df = get_data(args, load_extracted=True)\n elif os.path.isfile(train_path):\n df = load_pickle(\"df_encoded.pkl\")\n \n # Train-Test split\n msk = np.random.rand(len(df)) < args.train_test_ratio\n trainset = df[msk]\n testset = df[~msk]\n \n trainset = text_dataset(trainset, args)\n max_features_length = trainset.max_x_len\n max_seq_len = trainset.max_y_len\n train_length = len(trainset)\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n \n testset = text_dataset(testset, args)\n test_length = len(testset)\n test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=True,\\\n num_workers=0, collate_fn=Pad_Sequence(), pin_memory=False)\n return train_loader, train_length, max_features_length, max_seq_len, test_loader, test_length", "def pretrained(name=\"small_bert_L2_768\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(BertEmbeddings, name, lang, remote_loc)", "def predict_batch_generator(self):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(self.config.parsed_predict_file)\n sample_gen = self.predict_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count in sample_gen:\n seq_lengths[i], unique_counts[i] = seq_length, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i]\n\n fi.close()", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def __load(self, use_cache):\n\n cache_path = path_lib.get_relative_file_path('runtime', 'input_cache', f'company_embeddings_{VERSION}.pkl')\n if use_cache and os.path.isfile(cache_path):\n return path_lib.read_cache(cache_path)\n\n print(f'\\nloading data from {self.__competitor_path} ...')\n with open(self.__competitor_path, 'rb') as f:\n tmp = json.load(f)\n d_linkedin_name_2_linkedin_val = tmp['d_linkedin_name_2_linkedin_val']\n\n data = []\n\n print('loading sentence bert to generate embeddings ...')\n from sentence_transformers import SentenceTransformer\n self.__sentence_bert = SentenceTransformer('bert-large-nli-stsb-mean-tokens')\n\n # converting the raw data to features that we need\n for linkedin_name, linkedin_val in d_linkedin_name_2_linkedin_val.items():\n # get features\n feature = self.__choose_features(linkedin_val)\n data.append([feature, linkedin_name])\n\n print('writing cache ...')\n path_lib.cache(cache_path, data)\n\n print('finish loading ')\n return data", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def get_dataset(reader: DataReader):\n\n xs = []\n ys = []\n\n for annotation_sentences in reader.annotations:\n for annotation in annotation_sentences:\n xs.append([annotation.fee_raw] + annotation.sentence)\n ys.append(annotation.frame)\n\n return xs, ys", "def get_dataloaders(logging, batch_size):\n # Load Data\n logging.info(\"Reading Train and Test data...\")\n train_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-tr.txt\", header=None)\n test_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-ts.txt\", header=None)\n\n # Fix column names\n col_names = ['col_' + str(j + 1) for j in range(train_df.shape[1] - 1)]\n indep_cols = col_names.copy()\n col_names.append('y')\n\n logging.debug(\"Assigning columns\")\n train_df.columns = col_names\n test_df.columns = col_names\n\n # Encode dependent variable column\n le = LabelEncoder()\n le.fit(train_df['y'])\n logging.debug(f\"Classes: {le.classes_}\")\n logging.debug(f\"Transformed Classes: {le.transform(le.classes_)}\")\n\n train_df['y_enc'] = le.transform(train_df['y'])\n test_df['y_enc'] = le.transform(test_df['y'])\n\n # train_df.head()\n logging.debug(f\"Shape of train data: {train_df.shape}\")\n logging.debug(f\"Shape of test data: {test_df.shape}\")\n\n # Create train and validation dataloaders\n train_ds = AvilaDataset(data_frame=train_df, indep_cols=indep_cols, dep_col='y_enc')\n valid_ds = AvilaDataset(data_frame=test_df, indep_cols=indep_cols, dep_col='y_enc')\n\n # Should be some exponent of 2 (128, 256)\n # batch_size = 256\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)\n\n return train_dl, valid_dl, le", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings", "def load_data_time_machine(batch_size, num_steps, use_random_iter=False, max_tokens=10000):\n data_iter = SeqDataLoader( batch_size, num_steps, use_random_iter, max_tokens)\n return data_iter, data_iter.vocab", "def load_embeddings(db):\n size = db['size'].values\n emb = db['embedding'].values\n emb = [np.load(i).flatten() for i in emb]\n return emb, size", "def get_dataset_transformers(tokenizer, dataset_name, **kwargs):\n loader = get_loader(dataset_name, **kwargs)\n return get_transformer_splits(loader, tokenizer)", "def get_dataloaders(data_dir, hyp):\n \n # Grab data, targets\n data_file_paths, targets = get_files_paths_and_labels(data_dir)\n \n # Split into train/validation\n train_data, val_data, train_labels, val_labels = train_test_split(data_file_paths,\n targets,\n train_size=hyp['perc_train'],\n shuffle=hyp['shuffle'],\n stratify=targets)\n\n # Create train/validation augmentation handler\n train_aug = get_training_augmentations(hyp)\n val_aug = get_validation_augmentations(hyp)\n \n # Create datasets\n train_dset = SETIDataset(train_data, train_labels, transform=train_aug)\n val_dset = SETIDataset(val_data, val_labels, transform=val_aug)\n \n # Create dataloaders\n train_loader = DataLoader(train_dset, shuffle=True, batch_size=hyp['batch_size'], \n pin_memory=True, num_workers=8)\n \n val_loader = DataLoader(val_dset, batch_size=hyp['batch_size'], \n pin_memory=True, num_workers=8)\n \n return train_loader, val_loader", "def getEmbeddings(embed_loc, wrd_list, embed_dims):\n embed_list = []\n\n wrd2embed = {}\n for line in open(embed_loc, encoding='utf-8', errors='ignore'):\n data = line.strip().split(' ')\n\n # wrd, embed = data[0], data[1:]\n\n # Some words may be separated by space (telephone numbers, for example).\n # It's more robust to load data as follows.\n embed = data[-1 * embed_dims:]\n wrd = ' '.join(data[: -1 * embed_dims])\n\n embed = list(map(float, embed))\n wrd2embed[wrd] = embed\n\n for wrd in wrd_list:\n if wrd in wrd2embed:\n embed_list.append(wrd2embed[wrd])\n else:\n print('Word not in embeddings dump {}'.format(wrd))\n embed_list.append(np.random.randn(embed_dims))\n\n return np.array(embed_list, dtype=np.float32)", "def pretrained(name=\"sent_small_bert_L2_768\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(BertSentenceEmbeddings, name, lang, remote_loc)" ]
[ "0.6092691", "0.60436624", "0.60151154", "0.59234816", "0.59059787", "0.5782936", "0.5690828", "0.5666181", "0.5597089", "0.5583591", "0.5570702", "0.55484855", "0.55165946", "0.5495378", "0.5463972", "0.5451432", "0.54469055", "0.54453945", "0.5405851", "0.54010916", "0.53670686", "0.5364566", "0.5364171", "0.53459024", "0.5345126", "0.5336129", "0.5332505", "0.5328814", "0.5319814", "0.53138673", "0.53053963", "0.5303668", "0.5301716", "0.5287217", "0.52858055", "0.528551", "0.5284958", "0.5266408", "0.52561456", "0.52542347", "0.52519447", "0.52342576", "0.52193046", "0.52103114", "0.51980627", "0.5185945", "0.51854694", "0.51847804", "0.5170898", "0.51675653", "0.51674086", "0.51598114", "0.5142331", "0.5139044", "0.51251525", "0.51163125", "0.5109659", "0.5105079", "0.5104499", "0.5100808", "0.51007944", "0.5093603", "0.50882167", "0.50882167", "0.5087321", "0.5083018", "0.50765324", "0.5068567", "0.50614417", "0.5052495", "0.50513524", "0.5051306", "0.50452805", "0.50388885", "0.5038635", "0.5036818", "0.50358033", "0.5035297", "0.5033181", "0.5020955", "0.50137764", "0.50078946", "0.5006559", "0.50055575", "0.49990606", "0.49941736", "0.49921733", "0.498751", "0.4987367", "0.49867183", "0.49856207", "0.49781865", "0.49771416", "0.4974621", "0.49707416", "0.49702948", "0.4967891", "0.49647248", "0.49589318", "0.49492672" ]
0.73473763
0
Reduced embeddings using PCA.
def get_pca_embeddings(config, name, training_embedding: dict, validation_embedding: dict): data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension train_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p') valid_pca_pkl_f = os.path.join(config['pca_cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p') if os.path.exists(train_pca_pkl_f): logger.info(" Loading PCA-embeddings from cache ") with open(train_pca_pkl_f , 'rb') as cache: train_embeddings = pickle.load(cache) with open(valid_pca_pkl_f, 'rb') as cache: valid_embeddings = pickle.load(cache) else: logger.info(' Standardizing ') ss = StandardScaler() train_embed_ss = ss.fit_transform(training_embedding['embeddings']) valid_embed_ss = ss.transform(validation_embedding['embeddings']) # Dimension reduction: PCA or UMAP (?) logger.info(' Doing PCA...') pca_model = decomposition.PCA(n_components = 0.90) # this can be a parameter down the road, but for debugging it's fine train_reduc = pca_model.fit_transform(train_embed_ss) val_reduc = pca_model.transform(valid_embed_ss) training_embedding['embeddings'] = train_reduc validation_embedding['embeddings'] = val_reduc train_embeddings = training_embedding.copy() valid_embeddings = validation_embedding.copy() # save embeddings pickle.dump(train_embeddings, open(train_pca_pkl_f, 'wb')) pickle.dump(valid_embeddings, open(valid_pca_pkl_f, 'wb')) embedding_shape = len(train_embeddings['embeddings'][0]) return embedding_shape, train_embeddings, valid_embeddings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pca(embedding, num_components=3, principal_components=None):\n# shape = embedding.get_shape().as_list()\n shape = tf.shape(embedding)\n embedding = tf.reshape(embedding, [-1, shape[3]])\n\n if principal_components is None:\n principal_components = calculate_principal_components(embedding,\n num_components)\n embedding = tf.matmul(embedding, principal_components)\n\n embedding = tf.reshape(embedding,\n [shape[0], shape[1], shape[2], num_components])\n return embedding", "def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX", "def postprocess(self, embeddings_batch):\n assert len(embeddings_batch.shape) == 2, \"Expected 2-d batch, got %r\" % (\n embeddings_batch.shape,\n )\n assert (\n embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE\n ), \"Bad batch shape: %r\" % (embeddings_batch.shape,)\n\n # Apply PCA.\n # - Embeddings come in as [batch_size, embedding_size].\n # - Transpose to [embedding_size, batch_size].\n # - Subtract pca_means column vector from each column.\n # - Premultiply by PCA matrix of shape [output_dims, input_dims]\n # where both are are equal to embedding_size in our case.\n # - Transpose result back to [batch_size, embedding_size].\n pca_applied = torch.mm(\n self.pca_eigen_vectors, (embeddings_batch.t() - self.pca_means)\n ).t()\n\n # Quantize by:\n # - clipping to [min, max] range\n clipped_embeddings = torch.clamp(\n pca_applied, vggish_params.QUANTIZE_MIN_VAL, vggish_params.QUANTIZE_MAX_VAL\n )\n # - convert to 8-bit in range [0.0, 255.0]\n quantized_embeddings = torch.round(\n (clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL)\n * (\n 255.0\n / (vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL)\n )\n )\n return torch.squeeze(quantized_embeddings)", "def reduce_dimension(positives, negatives, to_return=True, fv_len=10,\n new_pca=True):\n\n features = dict() \n \n # namapovani na numpy matice pro PCA\n X = np.vstack((np.vstack(positives), np.vstack(negatives)))\n Y = np.vstack((np.vstack([1]*len(positives)), np.vstack([-1]*len(negatives)))) \n \n print \"Data shape: \", X.shape, Y.shape, len(positives[0])\n \n # ulozeni puvodnich dat do souboru\n #dr.save_obj(parentname + \"/\" + childname + \"/raw_data.pklz\")\n \n # PCA\n if new_pca or pca is None:\n pca = PCA(n_components=fv_len) # vytvori PCA\n #pca = DEC(n_components=fv_len) # vytvori PCA\n pca.fit(X, Y)\n \n reduced = pca.transform(X) # redukuje dimenzi vektoru priznaku\n \n # znovu namapuje na zavedenou strukturu\n features = list(reduced)\n \n # ulozeni PCA\n #dataset.save_obj(pca, self.PCA_path+\"/PCA_\"+self.descriptor_type+\".pkl\")\n\n if to_return: return pca, features", "def _reduce(self, X):\n if not self.random_state:\n random_state = 0\n else:\n random_state = self.random_state\n\n self._pca = PCA(n_components=self.n_components, copy=self.copy,\n whiten=self.whiten, svd_solver=self.svd_solver,\n tol=self.tol, iterated_power=self.iterated_power,\n random_state=random_state)\n\n self._pca.fit(X)\n self._X_reduced = self._pca.transform(X)\n self._set_attributes(self._pca)", "def pca(X, ndim):\n X_m = X - np.mean(X, axis=0)\n u, s, vh = np.linalg.svd(X_m)\n # traditional notation decomp(A) = U (sigma) VT = (u * s) @ vh\n W = vh[0:ndim].T\n # X_m = X - np.mean(X, axis=0)\n return np.matmul(X_m, W)", "def emulator(pca, gp_model, params):\n # Weights prediction\n pred_weights = gp_predict(gp_model, params)\n\n # Inverse PCA (pred_weights * basis + mean)\n reconstructed = pca.inverse_transform(pred_weights)\n return reconstructed", "def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;", "def pca(adata, n_components=50, train_ratio=0.35, n_batches=50, gpu=False):\n\n train_size = math.ceil(adata.X.shape[0] * train_ratio)\n\n if gpu:\n from cuml.decomposition import PCA\n import cupy as cp\n else:\n from sklearn.decomposition import PCA\n import numpy as cp\n\n pca = PCA(n_components=n_components).fit(adata.X[:train_size])\n \n embeddings = cp.zeros((adata.X.shape[0], n_components))\n batch_size = int(embeddings.shape[0] / n_batches)\n for batch in range(n_batches):\n start_idx = batch * batch_size\n end_idx = start_idx + batch_size\n\n if(adata.X.shape[0] - end_idx < batch_size):\n end_idx = adata.X.shape[0]\n\n embeddings[start_idx:end_idx,:] = cp.asarray(pca.transform(adata.X[start_idx:end_idx]))\n \n if gpu:\n embeddings = embeddings.get()\n\n adata.obsm[\"X_pca\"] = embeddings\n return adata", "def pca_2(emb) :\n pcaer = skd.PCA(n_components=2)\n pca = pcaer.fit_transform(emb)\n \n return pca", "def reduce_dimensions(feature_vectors_full, model):\n\n if 'fvectors' in model:\n v = np.array(model['fvectors'])\n else:\n #Principal Components Analysis implemented from lab code\n covx = np.cov(feature_vectors_full, rowvar=0)\n N = covx.shape[0]\n w, v = scipy.linalg.eigh(covx, eigvals=(N - 40, N - 1))\n v = np.fliplr(v)\n model['fvectors'] = v.tolist()\n pca_train = np.dot((feature_vectors_full - np.mean(feature_vectors_full)), v)\n return pca_train[:,0:10]", "def runPCA(data, reducedDimensions, showScree):\n print(\"-->Running PCA.\")\n latent = gp.pca(data['features'], reducedDimensions, showScree, savePlots)\n plot(latent, data['colours'], reducedDimensions, \"Iris Dataset\", \"PCA\")", "def apply_pca(X: numpy.ndarray, pca: sklearn.decomposition.PCA):\n output = pca.transform(X)\n return output", "def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)", "def pca_reduction(X, ncomp=20):\n print('Performing dimensionality reduction ...')\n\n # PCA fitting\n pca = PCA(n_components=ncomp)\n weights = pca.fit_transform(X)\n basis = pca.components_\n\n # # Plot cumsum(explained_variance) versus component\n # plt.semilogy(pca.explained_variance_ratio_*100, 's')\n # plt.ylabel('Explained Variance Ratio (%)', size=20)\n # plt.xticks(size=20)\n # plt.xlabel('Component', size=20)\n # plt.yticks(size=20)\n # plt.show()\n\n print('Explained variance ratio : '+str(round(np.cumsum(pca.explained_variance_ratio_)[-1]*100, 2))+' %.')\n\n # pickle.dump(pca, '/../Data/GPmodel/pca_'+str(ncomp))\n\n # Some plots on PCA\n # plot_pca(basis, weights)\n\n return pca, weights", "def doPCA(pairs, embedding, num_components=10):\n matrix = []\n for a, b in pairs:\n center = (embedding.v(a) + embedding.v(b)) / 2\n matrix.append(embedding.v(a) - center)\n matrix.append(embedding.v(b) - center)\n matrix = np.array(matrix)\n pca = PCA(n_components=num_components)\n pca.fit(matrix)\n # bar(range(num_components), pca.explained_variance_ratio_)\n return pca", "def pca():\n pca = PCA()\n\n data = pca.fit_transform([[22,23,24],[23,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54]])\n\n print(data)", "def reduce_single_vector_dimension(vect):\n\n # aplikace ulozeneho PCA\n reduced = pca.transform(vect) # redukuje dimenzi vektoru priznaku\n\n return reduced", "def preprocess(train_dataset, test_dataset):\n pca = PCA(n_components=20)\n pca.fit(train_dataset)\n train_dataset = pca.transform(train_dataset)\n test_dataset = pca.transform(test_dataset)\n return train_dataset, test_dataset", "def PCA(X, dims_rescaled_data=21):\n # pca = decomposition.PCA(n_components=3)\n # x_std = StandardScaler().fit_transform(X)\n # a = pca.fit_transform(x_std)\n\n R = np.cov(X, rowvar=False)\n evals, evecs = scipy.linalg.eigh(R)\n idx = np.argsort(evals)[::-1]\n evecs = evecs[:,idx]\n\n evals = evals[idx]\n evecs = evecs[:, :dims_rescaled_data]\n\n newX = np.dot(evecs.T, X.T).T\n\n return newX #, evals, evecs", "def pca(X, ndim):\n\n Xmean = X - np.mean(X, axis=0)\n _, _, vh = np.linalg.svd(Xmean)\n W = vh[:ndim].T\n T = np.matmul(Xmean, W)\n\n return T", "def PCA_reduction(\n df: pd.DataFrame,\n cols: List[str],\n n_components: int,\n prefix: str = 'PCA_',\n random_seed: int = 42,\n keep: bool = False\n) -> pd.DataFrame:\n print(\"Executing PCA reduction on dataset...\")\n df = df.copy()\n pca = decomposition.PCA(n_components=n_components, random_state=random_seed)\n\n principal_components = pca.fit_transform(df[cols])\n\n principal_df = pd.DataFrame(principal_components)\n if not keep:\n df.drop(cols, axis=1, inplace=True)\n\n principal_df.rename(columns=lambda x: str(prefix) + str(x), inplace=True)\n\n # Align index of principal components and the original dataset.\n principal_df = principal_df.set_index(df.index)\n\n df = pd.concat([df, principal_df], axis=1)\n\n return df", "def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self", "def reduce_dimension(self, n_components=2):\n\n reducer = PCA(n_components=n_components)\n\n X = self.data.values.astype(np.float32)\n\n norm = Normalizer()\n Xnorm = norm.fit_transform(X)\n\n return reducer.fit_transform(Xnorm)", "def dimensionality_reduction(train_frame,valid_frame=None,test_frame=None,columns=[],n_comp=320,random_seed=420,decompositions_to_run=['PCA','TSVD','ICA','GRP','SRP'],frame_type='spark',test_does_have_y=False,only_return_decompositions=False,id_col='ID', column_name=None):\n if frame_type == 'spark':\n from pyspark.ml.feature import PCA\n from pyspark.ml.linalg import Vectors\n from pyspark.ml.feature import VectorAssembler\n # from pyspark.ml.feature import VectorDisassembler\n from pyspark.ml.feature import StandardScaler\n from pyspark.ml import Pipeline\n\n train_df, valid_df, test_df = None,None,None\n train_df = train_frame\n if valid_frame:\n valid_df = valid_frame\n if test_frame:\n test_df = test_frame\n\n assembler = VectorAssembler(\n inputCols=columns,\n outputCol=\"features\")\n scaler = StandardScaler(inputCol=assembler.getOutputCol(),\n outputCol=\"scaledFeatures\",\n withStd=False,\n withMean=True)\n pca = PCA(k=n_comp, inputCol=scaler.getOutputCol(), outputCol=\"pcaFeatures\")\n pipeline = Pipeline(stages=[assembler,scaler, pca])\n\n #define a function for extracting pca vector column into their own columns\n def extract_vectors(row):\n \"\"\"\n Takes a vector and extracts it into many columns from the vector.\n pcaFeatures is the vector being extracted in this function.\n Vector values will be named _2, _3, ...\n \"\"\"\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return tuple(float(x) for x in row.pcaFeatures.values)\n\n #define a function for extracting pca vector column into their own columns\n def extract_vectors_with_id_col(row):\n \"\"\"\n Takes a vector and extracts it into many columns from the vector.\n pcaFeatures is the vector being extracted in this function.\n Vector values will be named _2, _3, ...\n \"\"\"\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.pcaFeatures.values)\n\n def rename_columns(dataframe,new_prefix='pca_',old_colomn_starting_index=2,new_column_starting_index=1):\n \"\"\"\n Takes a spark df and renames all columns to something like pca_1\n from the previously named columns.\n \"\"\"\n old_column_index = old_colomn_starting_index\n new_column_index = new_column_starting_index\n for i in range(0,n_comp):\n if column_name:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),column_name+'_'+new_prefix+str(new_column_starting_index))\n else:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),new_prefix+str(new_column_starting_index))\n old_colomn_starting_index+=1\n new_column_starting_index+=1\n return dataframe\n\n #Do PCA tranformation for training data\n model_train = pipeline.fit(train_frame)\n result_train = model_train.transform(train_frame)\n extracted_pca_train = result_train.rdd.map(extract_vectors_with_id_col).toDF([id_col])\n extracted_pca_train = rename_columns(extracted_pca_train)\n\n #Do PCA tranformation for validation data if it was given\n extracted_pca_valid = None\n model_valid = None #Will need this to fit test if it doesn't have y values\n if valid_frame:\n model_valid = pipeline.fit(valid_frame)\n result_valid = model_train.transform(valid_frame)\n extracted_pca_valid = result_valid.rdd.map(extract_vectors_with_id_col).toDF([id_col])\n extracted_pca_valid = rename_columns(extracted_pca_valid)\n\n #Do PCA tranformation for test data if it was given\n extracted_pca_test = None\n if test_frame:\n model_test = pipeline.fit(test_frame)\n result_test = model_test.transform(test_frame)\n extracted_pca_test = result_test.rdd.map(extract_vectors_with_id_col).toDF([id_col])\n extracted_pca_test = rename_columns(extracted_pca_test)\n ###\n ### SVD ###\n ###\n # https://stackoverflow.com/questions/33428589/pyspark-and-pca-how-can-i-extract-the-eigenvectors-of-this-pca-how-can-i-calcu/33500704#33500704\n # https://github.com/apache/spark/blob/master/examples/src/main/python/mllib/svd_example.py\n # https://blog.dominodatalab.com/pca-on-very-large-neuroimaging-datasets-using-pyspark/\n from pyspark.mllib.linalg.distributed import RowMatrix\n from pyspark.mllib.linalg.distributed import IndexedRow, IndexedRowMatrix\n from pyspark.mllib.linalg import DenseVector\n\n def extract_svd_vectors_with_id_col(row):\n \"\"\"\n Takes a vector and extracts it into many columns from the vector.\n pcaFeatures is the vector being extracted in this function.\n Vector values will be named _2, _3, ...\n \"\"\"\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.svdFeatures.values)\n\n if 'SVD' in decompositions_to_run:\n #Train first\n mat = IndexedRowMatrix(result_train.rdd.map(lambda row: IndexedRow(row[id_col],DenseVector(row['pcaFeatures']))))\n svd = mat.computeSVD(n_comp, computeU=True)\n U = svd.U # The U factor is a RowMatrix.\n s = svd.s # The singular values are stored in a local dense vector.\n V = svd.V\n # Print vectors for testing\n# collected = U.rows.collect()\n# print(\"U factor is:\")\n# for vector in collected:\n# print(vector)\n# print(\"Singular values are: %s\" % s)\n# print(\"V factor is:\\n%s\" % V)\n extracted_svd_train = U.rows.map(lambda x: (x, )).toDF().rdd.map(lambda x: (x['_1'][0],x['_1'][1] )).toDF([id_col,'svdFeatures']).rdd.map(extract_svd_vectors_with_id_col).toDF([id_col])\n extracted_svd_train = rename_columns(extracted_svd_train,new_prefix='svd_')\n if valid_frame:\n mat = IndexedRowMatrix(result_valid.rdd.map(lambda row: IndexedRow(row[id_col],DenseVector(row['pcaFeatures']))))\n svd = mat.computeSVD(n_comp, computeU=True)\n U = svd.U # The U factor is a RowMatrix.\n s = svd.s # The singular values are stored in a local dense vector.\n V = svd.V # The V factor is a local dense matrix.\n extracted_svd_valid = U.rows.map(lambda x: (x, )).toDF().rdd.map(lambda x: (x['_1'][0],x['_1'][1] )).toDF([id_col,'svdFeatures']).rdd.map(extract_svd_vectors_with_id_col).toDF([id_col])\n extracted_svd_valid = rename_columns(extracted_svd_valid,new_prefix='svd_')\n if test_frame:\n mat = IndexedRowMatrix(result_valid.rdd.map(lambda row: IndexedRow(row[id_col],DenseVector(row['pcaFeatures']))))\n svd = mat.computeSVD(n_comp, computeU=True)\n U = svd.U # The U factor is a RowMatrix.\n s = svd.s # The singular values are stored in a local dense vector.\n V = svd.V # The V factor is a local dense matrix.\n extracted_svd_test = U.rows.map(lambda x: (x, )).toDF().rdd.map(lambda x: (x['_1'][0],x['_1'][1] )).toDF([id_col,'svdFeatures']).rdd.map(extract_svd_vectors_with_id_col).toDF([id_col])\n extracted_svd_test = rename_columns(extracted_svd_test,new_prefix='svd_')\n\n if only_return_decompositions:\n train_df = train_df.select(id_col)\n if valid_df:\n train_df = valid_df.select(id_col)\n if test_df:\n test_df = test_df.select(id_col)\n if 'PCA' in decompositions_to_run:\n train_df = extracted_pca_train.join(train_df,id_col,'inner')\n if valid_df:\n valid_df = extracted_pca_valid.join(valid_df,id_col,'inner')\n if test_df:\n test_df = extracted_pca_test.join(test_df,id_col,'inner')\n if 'SVD' in decompositions_to_run:\n train_df = extracted_svd_train.join(train_df,id_col,'inner')\n if valid_df:\n valid_df = extracted_svd_valid.join(valid_df,id_col,'inner')\n if test_df:\n test_df = extracted_svd_test.join(test_df,id_col,'inner')\n # return the right number of frames\n if valid_frame:\n if test_frame:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),valid_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),test_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n else:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),valid_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n else:\n if test_frame:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),test_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n else:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n\n elif frame_type in ['h2o','pandas']:\n from sklearn.random_projection import GaussianRandomProjection\n from sklearn.random_projection import SparseRandomProjection\n from sklearn.decomposition import PCA, FastICA\n from sklearn.decomposition import TruncatedSVD\n import pandas as pd\n\n train_df, test_df, valid_df = None, None, None\n if frame_type == 'h2o':\n # convert to pandas\n train_df = train_frame.as_data_frame()\n if valid_frame:\n valid_df = valid_frame.as_data_frame()\n test_df = test_frame.as_data_frame()\n elif frame_type == 'pandas':\n train_df = training_frame\n if valid_frame:\n valid_df = valid_frame\n test_df = test_frame\n\n train_df = train_df[columns]\n if valid_frame:\n valid_df = valid_df[columns]\n test_df = test_df[columns]\n\n\n tsvd_results_train, tsvd_results_valid, tsvd_results_test = None, None, None\n if 'TSVD' in decompositions_to_run:\n tsvd = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_train = tsvd.fit_transform(train_df)\n tsvd_results_valid, tsvd_results_test = None, None\n if valid_frame:\n tsvd2 = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_valid = tsvd2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n tsvd3 = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_test = tsvd3.fit_transform(test_df)\n else:\n tsvd_results_test = tsvd2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n tsvd3 = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_test = tsvd3.fit_transform(test_df)\n else:\n tsvd_results_test = tsvd.transform(test_df)\n\n #PCA\n pca_results_train, pca_results_valid, pca_results_test = None, None, None\n if 'PCA' in decompositions_to_run:\n pca = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_train = pca.fit_transform(train_df)\n if valid_frame:\n pca2 = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_valid = pca2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n pca3 = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_test = pca3.fit_transform(test_df)\n else:\n pca_results_test = pca2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n pca3 = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_test = pca3.fit_transform(test_df)\n else:\n pca_results_test = pca.transform(test_df)\n\n # ICA\n ica_results_train, ica_results_valid, ica_results_test = None, None, None\n if 'ICA' in decompositions_to_run:\n ica = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_train = ica.fit_transform(train_df)\n if valid_frame:\n ica2 = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_valid = ica2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n ica3 = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_test = ica3.fit_transform(test_df)\n else:\n ica_results_test = ica2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n ica3 = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_test = ica3.fit_transform(test_df)\n else:\n ica_results_test = ica.transform(test_df)\n\n\n # GRP\n grp_results_train, grp_results_valid, grp_results_test = None, None, None\n if 'GRP' in decompositions_to_run:\n grp = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_train = grp.fit_transform(train_df)\n if valid_frame:\n grp2 = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_valid = grp2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n grp3 = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_test = grp3.fit_transform(test_df)\n else:\n grp_results_test = grp2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n grp3 = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_test = grp3.fit_transform(test_df)\n else:\n grp_results_test = grp.transform(test_df)\n\n # SRP\n srp_results_train, srp_results_valid, srp_results_test = None, None, None\n if 'SRP' in decompositions_to_run:\n srp = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_train = srp.fit_transform(train_df)\n if valid_frame:\n srp2 = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_valid = srp2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n srp3 = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_test = srp3.fit_transform(test_df)\n else:\n srp_results_test = srp2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n srp3 = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_test = srp3.fit_transform(test_df)\n else:\n srp_results_test = srp.transform(test_df)\n\n if only_return_decompositions:\n train_df = pd.DataFrame()\n if valid_frame:\n valid_df = pd.DataFrame()\n if test_frame:\n test_df = pd.DataFrame()\n for i in range(1, n_comp + 1):\n if 'PCA' in decompositions_to_run:\n train_df['pca_' + str(i)] = pca_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = pca_results_valid[:, i - 1]\n if test_frame:\n test_df['pca_' + str(i)] = pca_results_test[:, i - 1]\n\n if 'ICA' in decompositions_to_run:\n train_df['ica_' + str(i)] = ica_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = ica_results_valid[:, i - 1]\n if test_frame:\n test_df['ica_' + str(i)] = ica_results_test[:, i - 1]\n\n if 'TSVD' in decompositions_to_run:\n train_df['tsvd_' + str(i)] = tsvd_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = tsvd_results_valid[:, i - 1]\n if test_frame:\n test_df['tsvd_' + str(i)] = tsvd_results_test[:, i - 1]\n\n if 'GRP' in decompositions_to_run:\n train_df['grp_' + str(i)] = grp_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = grp_results_valid[:, i - 1]\n if test_frame:\n test_df['grp_' + str(i)] = grp_results_test[:, i - 1]\n\n if 'SRP' in decompositions_to_run:\n train_df['srp_' + str(i)] = srp_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = srp_results_valid[:, i - 1]\n if test_frame:\n test_df['srp_' + str(i)] = srp_results_test[:, i - 1]\n\n if frame_type == 'pandas':\n if valid_frame:\n if test_frame:\n return (train_df, valid_df, test_df)\n else:\n return (train_df, valid_df)\n else:\n if test_frame:\n return (train_df, test_df)\n else:\n return (train_df)\n elif frame_type == 'h2o':\n # convert back to h2o\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n training_frame = h2o.H2OFrame(train_df)\n training_frame.columns = list(train_df)\n # conserve memory\n del train_df\n testing_frame = None\n if test_frame:\n # convert test back to h2o\n testing_frame = h2o.H2OFrame(test_df)\n testing_frame.columns = list(test_df)\n # conserve memory\n del test_df\n validation_frame = None\n if valid_frame:\n # convert test back to h2o\n validation_frame = h2o.H2OFrame(valid_df)\n validation_frame.columns = list(valid_df)\n # conserve memory\n del valid_df\n\n print('Done.')\n\n if valid_frame:\n if test_frame:\n return training_frame, validation_frame, testing_frame\n else:\n return training_frame, validation_frame\n else:\n if test_frame:\n return training_frame, testing_frame\n else:\n return training_frame", "def get_pca():\n from sklearn.decomposition import PCA\n return PCA()", "def _reduce_matrix_dimensions(self, principal_components=5):\n # TODO: consider the TruncatedSVD or other sklearn PCA algorithm varients here\n\n pca = decomposition.PCA(n_components=principal_components, random_state=RANDOM_STATE)\n pca.fit(\n np.rot90(self.mtx.toarray())\n ) # rotate by 90 degrees to accomadate for axis which reduction is performed on\n self.pca_matrix = pca.transform(np.rot90(self.mtx.toarray()))", "def pca(self, X):\n return ImgCompression.svd(self, X)", "def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s", "def run_pca(df, cols=None): \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n import mpld3\n\n # Define and markers to use for different categories\n groups_dict = {(u'D', 0):('Germany, unregulated', 'g', 'o'),\n (u'N', 0):('Norway, unregulated', 'b', 'o'),\n (u'D', 1):('Germany, regulated', 'g', '^'),\n (u'N', 1):('Norway, regulated', 'b', '^')}\n \n # Extract cols of interest\n cats = df[['country', 'regulated']]\n\n if cols:\n df = df[cols].astype(float)\n\n # Standardise the feature data\n feat_std = StandardScaler().fit_transform(df)\n\n # Setup PCA. Initially, choose to keep ALL components\n pca = PCA()\n\n # Fit model\n pca.fit(feat_std)\n\n # Get explained variances (in %)\n var_exp = 100*pca.explained_variance_ratio_\n cum_exp = np.cumsum(var_exp)\n\n # Get eigenvalues\n cov_mat = np.cov(feat_std.T)\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n # Get number of EVs > 1 (Kaiser-Guttman criterion)\n # and print summary\n n_kgc = (eig_vals > 1).sum()\n print 'Variance explained by first %s PCs (%%):\\n' % n_kgc\n print var_exp[:n_kgc]\n print '\\nTotal: %.2f%%' % var_exp[:n_kgc].sum()\n \n # Plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n \n # Explained variance\n axes[0].bar(range(1, len(var_exp)+1), var_exp, \n align='center', label='Individual components')\n axes[0].plot(range(1, len(cum_exp)+1), cum_exp, \n 'r-o', label='Cumulative')\n axes[0].set_xlabel('Principal component')\n axes[0].set_ylabel('Variance explained (%)')\n axes[0].legend(loc='center right')\n \n # Eigenvalues\n axes[1].plot(range(1, len(eig_vals)+1), np.sort(eig_vals)[::-1], \n 'r-o', label='Eigenvalues')\n axes[1].axhline(1, c='k', ls='-', label='Kaiser-Guttman threshold')\n axes[1].set_xlabel('Principal component')\n axes[1].set_ylabel('Eigenvalue')\n axes[1].legend(loc='upper right') \n \n # PC loadings\n loads = pd.DataFrame(data=pca.components_, \n columns=df.columns,\n index=range(1, pca.components_.shape[0]+1)).T\n\n # Project into 2 and 3 components\n fig = plt.figure(figsize=(16, 6))\n \n # Plot 2 components\n ax = fig.add_subplot(1, 2, 1)\n \n # Refit the PCA, this time specifying 2 components\n # and transforming the result\n feat_reduced = PCA(n_components=2).fit_transform(feat_std)\n \n # Build df \n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n\n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], s=60,\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2])\n \n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_title('First two PCA directions')\n \n # Plot 3 components\n ax = fig.add_subplot(1, 2, 2, projection='3d', \n elev=-150, azim=135)\n\n # Refit the PCA, this time specifying 3 components\n # and transforming the result\n feat_reduced = PCA(n_components=3).fit_transform(feat_std)\n\n # Build df with colours\n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'PC3':feat_reduced[:, 2],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n \n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], group['PC3'],\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2],\n s=60)\n \n ax.set_title('First three PCA directions')\n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_zlabel('Third principal component')\n ax.legend(bbox_to_anchor=(0.15, -0.1), frameon=True)\n plt.show()\n\n return loads", "def PCA(data, n=2):\n U, S, Vt = np.linalg.svd(data, full_matrices=False)\n s = np.diag(S)\n newdata = np.dot(U[:, :n], np.dot(s[:n, :n], Vt[:n,:]))\n return newdata", "def pca(image):\n # Reshape image.\n reshaped_image = np.reshape(image, (224 * 224, 3))\n # Find the covariance.\n cov = np.cov(reshaped_image, rowvar=0)\n # Eigenvalues and vectors.\n eigvals, eigvecs = np.linalg.eigh(cov)\n\n # Pick random gaussian values.\n a = np.random.normal(0, 0.1, size=(3,))\n\n scaled = eigvals * a\n delta = np.dot(eigvecs, scaled.T)\n return np.add(delta, scaled)", "def principle_component_analysis(data_frame, dim=2):\n pca = PCA(n_components=dim)\n sc = StandardScaler()\n y = data_frame.loc[:, [\"Label\"]].values\n x = pd.DataFrame(data_frame[\"Vector\"].tolist())\n x = sc.fit_transform(x)\n principlecomponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(data=principlecomponents)\n data_frame[\"Vector\"] = principalDf.values.tolist()", "def pca(X, k = 30):\n \n # Center/scale the data.\n s = np.std(X, axis=0)\n s = np.where(s==0, 1, s)\n X = (X - np.mean(X, axis=0))/s\n \n # Run PCA with sklearn.\n pca_ = PCA(n_components=k)\n return pca_.fit_transform(X)", "def pca(frame,columns=[],k=320,frame_type='spark'):\n if frame_type == 'spark':\n # https://stackoverflow.com/questions/33428589/pyspark-and-pca-how-can-i-extract-the-eigenvectors-of-this-pca-how-can-i-calcu/33481471\n from numpy.linalg import eigh\n from pyspark.ml.linalg import Vectors\n from pyspark.ml.feature import VectorAssembler\n from pyspark.ml.feature import StandardScaler\n from pyspark.ml import Pipeline\n\n assembler = VectorAssembler(\n inputCols=columns,\n outputCol=\"features\")\n scaler = StandardScaler(inputCol=assembler.getOutputCol(),\n outputCol=\"scaledFeatures\",\n withStd=False,\n withMean=True)\n pipeline = Pipeline(stages=[assembler,scaler])\n model = pipeline.fit(frame)\n df = model.transform(frame)\n\n def estimateCovariance(df):\n \"\"\"Compute the covariance matrix for a given dataframe.\n\n Note:\n The multi-dimensional covariance array should be calculated using outer products. Don't\n forget to normalize the data by first subtracting the mean.\n\n Args:\n df: A Spark dataframe with a column named 'features', which (column) consists of DenseVectors.\n\n Returns:\n np.ndarray: A multi-dimensional array where the number of rows and columns both equal the\n length of the arrays in the input dataframe.\n \"\"\"\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()\n\n cov = estimateCovariance(df)\n col = cov.shape[1]\n eigVals, eigVecs = eigh(cov)\n inds = np.argsort(eigVals)\n eigVecs = eigVecs.T[inds[-1:-(col+1):-1]]\n components = eigVecs[0:k]\n eigVals = eigVals[inds[-1:-(col+1):-1]] # sort eigenvals\n score = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: np.dot(x, components.T) )\n\n #Show the Variance explained\n print('Vairance Explained:', sum(eigVals[0:k])/sum(eigVals) )\n\n # Return the `k` principal components, `k` scores, and all eigenvalues\n return components.T, score, eigVals\n elif frame_type in ['h2o','pandas']:\n raise Exception('Not Implemented yet.')", "def reduce_dims(X=None, y=None, data_path=None, algorithm='pca', perplexity=50, labels=['M', 'R', 'HC', 'V', 'PO']):\n\n if data_path is not None:\n X, y = load_tma_data(data_path)\n\n X = X.reshape(X.shape[0], X.shape[1] * X.shape[2])\n\n print(\"Number of Training Samples : \", X.shape[0])\n\n # standardize the data\n sc = StandardScaler()\n X = sc.fit_transform(X)\n\n # reduce the dimensionality of the data\n if algorithm == 'pca':\n pca = PCA(n_components=2)\n X_reduced = pca.fit_transform(X)\n\n if algorithm == 'tsne':\n tsne = TSNE(n_components=2, perplexity=perplexity, random_state=0, verbose=True)\n X_reduced = tsne.fit_transform(X)\n\n # plot the latent space\n plot_latent_space(X_reduced, y, labels)", "def pca(filename, class_col, sample):\n\n\tX = ml.read_file( filename )\n\n\t# Remove the class label from the dataset so that it doesn't prevent us from training a classifier in the future\n\tif class_col != None:\n\t\ttry:\n\t\t\tclassifier = ml.pd.DataFrame(X.iloc[:, class_col])\n\t\texcept:\n\t\t\tml.sys.exit('Class column out of range.')\n\t\tm = X.shape[1]\n\t\tkeepers = list(range(m))\n\t\tkeepers.pop( class_col )\n\n\t# Determine whether sample is present\n\tX_input = X.iloc[:, keepers]\n\n\t# # Visualize raw data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = X, x = X_input['Petal Length (cm)'], y = X_input['Petal Width (cm)'], color = 'k', alpha = 0.5).set(title = filename + ' raw')\n\n\t# Normalize features by Z-score (so that features' units don't dominate PCs), and apply PCA\n\tX_norm, X_mean, X_std = ml.z_norm(X_input)\n\tY, P, e_scaled = ml.pca_cov( X_norm )\n\n\t# Visualize 2D PC data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = Y, x = Y.iloc[:, 0], y = Y.iloc[:, 1], alpha=0.5, color = 'k').set(title = 'PC 2D Projection')\n\n\t# Visualize PCs with heatmap and cree plot\n\tinfo_retention = ml.scree_plot( e_scaled )\n\tml.pc_heatmap( P, info_retention )\n\n\t# Reconstruct data\n\treconstruct(X_input, X_mean, X_std, Y, P, e_scaled, 2, 3)\n\n\tml.plt.show()", "def pca_clean(inputspectra, n_components):\n spectra = inputspectra.spectra\n pca = decomposition.PCA(n_components)\n pca_fit = pca.fit(spectra)\n inputspectra.spectra_reduced = pca_fit.transform(spectra)\n inputspectra.spectra = pca_fit.inverse_transform(inputspectra.spectra_reduced)\n return inputspectra", "def PCA_vis(select_PCA_features, player_attributes):\n x = player_attributes.loc[:, select_PCA_features].values\n\n # Standardizing the features\n x = StandardScaler().fit_transform(x)\n\n # perform 3 component PCA\n pca = PCA(n_components=3)\n principalComponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(\n data=principalComponents,\n columns=[\n \"principal component 1\",\n \"principal component 2\",\n \"principal component 3\",\n ],\n )\n\n # plot players dataset projection on three principal components\n # %matplotlib notebook\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1, projection=\"3d\")\n ax.set_title(\"3 component PCA\", fontsize=30)\n\n # plot first k players' info along principal components\n k = 4000\n ax.scatter(\n principalDf.loc[:k, \"principal component 1\"],\n principalDf.loc[:k, \"principal component 2\"],\n principalDf.loc[:k, \"principal component 3\"],\n s=1,\n )\n\n ax.set_xlabel(\"Principal Component 1\", fontsize=15)\n ax.set_ylabel(\"Principal Component 2\", fontsize=15)\n ax.set_zlabel(\"Principal Component 3\", fontsize=15)\n plt.show()\n\n return principalDf", "def doPCA(df, grouping_variable, features_to_analyse, plot_save_dir=None, PCs_to_keep=10):\n \n data = df[features_to_analyse]\n \n # Normalise the data before PCA\n zscores = data.apply(zscore, axis=0)\n \n # Drop features with NaN values after normalising\n colnames_before = list(zscores.columns)\n zscores.dropna(axis=1, inplace=True)\n colnames_after = list(zscores.columns)\n nan_cols = [col for col in colnames_before if col not in colnames_after]\n if len(nan_cols) > 0:\n print(\"Dropped %d features with NaN values after normalization:\\n%s\" %\\\n (len(nan_cols), nan_cols))\n\n print(\"\\nPerforming Principal Components Analysis (PCA)...\")\n \n # Fit the PCA model with the normalised data\n pca = PCA()\n pca.fit(zscores)\n \n # Project data (zscores) onto PCs\n projected = pca.transform(zscores) # A matrix is produced\n # NB: Could also have used pca.fit_transform()\n\n # Plot summary data from PCA: explained variance (most important features)\n important_feats, fig = pcainfo(pca, zscores, PC=1, n_feats2print=10) \n \n if plot_save_dir:\n # Save plot of PCA explained variance\n PCAplotroot = Path(plot_save_dir) / 'PCA'\n PCAplotroot.mkdir(exist_ok=True, parents=True)\n PCAplotpath = PCAplotroot / ('control_variation_in_' + \n grouping_variable + \n '_PCA_explained.eps')\n savefig(PCAplotpath, tight_layout=True, tellme=True, saveFormat='eps')\n plt.pause(2); plt.close()\n else:\n PCAplotpath=None\n plt.show(); plt.pause(2); plt.close()\n \n # Store the results for first few PCs in dataframe\n projected_df = pd.DataFrame(projected[:,:PCs_to_keep],\n columns=['PC' + str(n+1) for n in range(PCs_to_keep)]) \n \n # Add concatenate projected PC results to metadata\n projected_df.set_index(df.index, inplace=True) # Do not lose video snippet index position\n \n df = pd.concat([df, projected_df], axis=1)\n\n # Plot PCA - Variation in control data with respect to a given variable (eg. date_recording_yyyymmdd)\n \n # 2-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_explained', \n '_PCA_2_components'))\n title = \"2-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=2)\n plt.pause(2); plt.close()\n \n # 3-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_2_components', \n '_PCA_3_components'))\n title = \"3-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=3, rotate=False)\n plt.pause(2)\n \n return df", "def pca(features, components=6):\n pca = PCA(n_components=components)\n transformed = pca.fit(features).transform(features)\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(transformed)\n return scaler.transform(transformed), pca, scaler", "def analyse_pca(cluster, three_dim=True):\n # create data array and name array:\n A = cluster.data_matrix\n names = cluster.row_header\n\n # assign colours to samples:\n colorconvert = {'F':'go', 'S':'co', 1:'ro', 2:'go', 3:'ko', 4:'bo', 5:'co', 6:'mo', 7:'yo', 8:'r<', 9:'g<', 10:'k<', 11:'b<', 12:'c<', 13:'m<', 14:'y<', 15:'rs', 16:'gs', 17:'ks', 18:'bs', 19:'cs', 20:'ms', 21:'ys' }\n colourlist = []\n for name in names:\n phase = re.search(\"(F|S)\", name)\n if phase is not None:\n #print phase.groups()[0]\n colourlist.append(colorconvert[phase.groups()[0]])\n else:\n colourlist.append('ko')\n #print names, \"\\n\", colourlist\n\n ############# PCA using numpy SVD decomposition ##################################\n print \"#\" * 30\n print \"SVA analysis\"\n U, s, Vt = numpy.linalg.svd(A, full_matrices=True)\n V = Vt.T\n\n # sort the PCs by descending order of the singular values (i.e. by the\n # proportion of total variance they explain)\n ind = numpy.argsort(s)[::-1]\n U = U[:, ind]\n s = s[ind]\n V = V[:, ind]\n S = numpy.diag(s)\n\n sumval = sum([ i ** 2 for i in s ])\n\n # if we use all of the PCs we can reconstruct the noisy signal perfectly\n\n # Mhat = numpy.dot(U, numpy.dot(S, V.T))\n # if we use only the first 2 PCs the reconstruction is less accurate\n # Mhat2 = numpy.dot(U[:, :2], numpy.dot(S[:2, :2], V[:,:2].T))\n\n # To remove the variance of the 1st PC, which is primarily associated with experimenter:\n matrix_reduced = numpy.dot(U[:,1:], numpy.dot(S[1:,1:], V[:,1:].T))\n #for checking decomposition is occurring properly:\n #print numpy.shape(U)\n #print numpy.shape(S)\n #print numpy.shape(Vt)\n #print numpy.shape(matrix_reduced)\n\n #print \"#\" * 30\n #print \"SVD eigenvectors/loadings:\"\n #print header[:var_num] , \"\\n\"\n #print U # need to work out appropriate way to calculate loadings!\n #print \"#\" * 30\n #print \"checking distance of loadings (eigen vectors)\"\n #for col in loadings[:,:]:\n # print col\n # print numpy.sqrt(sum([ a ** 2 for a in col ]))\n\n print \"PCA explained variance:\"\n print [ (z ** 2 / sumval) for z in s ]\n\n # * if M is considered to be an (observations, features) matrix, the PCs\n # themselves would correspond to the rows of S^(1/2)*V.T. if M is\n # (features, observations) then the PCs would be the columns of\n # U*S^(1/2).\n\n #q_scores = numpy.dot(numpy.sqrt(S), V.T)\n q_scores = numpy.dot(U, numpy.sqrt(S))\n\n pp = PdfPages(cluster.exportPath[0:-4] + '.PCA.pdf')\n if three_dim: # plot a three dimensional graph:\n fig = plt.figure(1)\n ax = fig.add_subplot(111, projection='3d')\n for idx in range(len(colourlist)):\n xs = q_scores[idx,0]\n ys = q_scores[idx,1]\n zs = q_scores[idx,2]\n name = re.search('[FS][LP][0-9]+',names[idx]).group(0)\n ax.scatter(xs, ys, zs, c=colourlist[idx][0], marker='o')\n ax.text(xs, ys, zs, name)\n\n ax.set_xlabel(\"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval))\n ax.set_ylabel(\"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval))\n ax.set_zlabel(\"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval))\n\n plt.savefig(pp, format='pdf')\n plt.show()\n else: # plot two 2D graphs instead:\n for idx in range(len(colourlist)):\n fig = plt.figure(1)\n\n sub1 = fig.add_subplot(2,1,1)\n sub1.plot(q_scores[idx,0], q_scores[idx,1], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval) )\n sub1.annotate( names[idx], xy=(q_scores[idx,0], q_scores[idx,1]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n sub2 = fig.add_subplot(2,1,2)\n sub2.plot(q_scores[idx,0], q_scores[idx,2], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval) )\n sub2.annotate( names[idx], xy=(q_scores[idx,0],q_scores[idx,2]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n plt.savefig(pp, format='pdf')\n plt.show()\n\n plt.close()\n return matrix_reduced", "def reduce(x, reduce='IncrementalPCA', ndims=3, normalize=None, align=None,\n model=None, model_params=None, internal=False):\n\n # sub functions\n def reduce_list(x, model):\n split = np.cumsum([len(xi) for xi in x])[:-1]\n x_r = np.vsplit(model.fit_transform(np.vstack(x)), split)\n if len(x)>1:\n return [xi for xi in x_r]\n else:\n return [x_r[0]]\n\n # dictionary of models\n models = {\n 'PCA' : PCA,\n 'IncrementalPCA' : IncrementalPCA,\n 'SparsePCA' : SparsePCA,\n 'MiniBatchSparsePCA' : MiniBatchSparsePCA,\n 'KernelPCA' : KernelPCA,\n 'FastICA' : FastICA,\n 'FactorAnalysis' : FactorAnalysis,\n 'TruncatedSVD' : TruncatedSVD,\n 'DictionaryLearning' : DictionaryLearning,\n 'MiniBatchDictionaryLearning' : MiniBatchDictionaryLearning,\n 'TSNE' : TSNE,\n 'Isomap' : Isomap,\n 'SpectralEmbedding' : SpectralEmbedding,\n 'LocallyLinearEmbedding' : LocallyLinearEmbedding,\n 'MDS' : MDS\n }\n\n # deprecated warning\n if (model is not None) or (model_params is not None):\n warnings.warn('Model and model params will be deprecated. Please use the \\\n reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce')\n reduce = {}\n reduce['model'] = model\n reduce['params'] = model_params\n\n # if model is None, just return data\n if reduce is None:\n return x\n else:\n\n # common format\n x = format_data(x, ppca=True)\n\n # deprecation warnings\n if normalize is not None:\n warnings.warn('The normalize argument will be deprecated for this function. Please use the \\\n analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')\n x = normalizer(x, normalize=normalize)\n\n if align is not None:\n warnings.warn('The align argument will be deprecated for this function. Please use the \\\n analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')\n x = aligner(x, align=align)\n\n # if the shape of the data is already less than ndims, just return it\n if all([i.shape[1]<=ndims for i in x]):\n return x\n\n # if reduce is a string, find the corresponding model\n if type(reduce) in [str, np.string_]:\n model = models[reduce]\n model_params = {\n 'n_components' : ndims\n }\n # if its a dict, use custom params\n elif type(reduce) is dict:\n if type(reduce['model']) is str:\n model = models[reduce['model']]\n if reduce['params'] is None:\n model_params = {\n 'n_components' : ndims\n }\n else:\n model_params = reduce['params']\n if 'n_components' not in model_params:\n model_params['n_components'] = ndims\n\n # initialize model\n model = model(**model_params)\n\n # reduce data\n x_reduced = reduce_list(x, model)\n\n # return data\n if internal or len(x_reduced)>1:\n return x_reduced\n else:\n return x_reduced[0]", "def pca_detector(data):\n #- 'vol_shape' is the shape of volumes\n vol_shape = data.shape[:-1]\n #- 'n_vols' is the number of volumes\n n_vols = data.shape[-1]\n #- N is the number of voxels in a volume\n N = np.prod(vol_shape)\n\n #- Reshape to 2D array that is voxels by volumes (N x n_vols)\n # transpose to n_vols x N\n X = data.reshape((N, n_vols)).T\n\n \"\"\"\n The first part of the code will use PCA to get component matrix U\n and scalar projections matrix C\n \"\"\"\n\n #- Calculate unscaled covariance matrix for X\n unscaled_cov = X.dot(X.T)\n\n #- Use SVD to return U, S, VT matrices from unscaled covariance\n U, S, VT = npl.svd(unscaled_cov)\n\n #- Calculate the scalar projections for projecting X onto the vectors in U.\n #- Put the result into a new array C.\n C = U.T.dot(X)\n # set nans to 0\n C[np.isnan(C)] = 0\n #- Transpose C\n #- Reshape C to have the 4D shape of the original data volumes.\n C_vols = C.T.reshape((vol_shape + (n_vols,)))\n\n \"\"\"\n The second part of the code determines which voxels are inside the brain\n and which are outside the brain and creates a mask (boolean matrix)\n \"\"\"\n\n #get the mean voxel intensity of entire 4D object\n mean_voxel = np.mean(data)\n #get the mean volume (3D) across time series (axis 3)\n mean_volume = np.mean(data, axis=3)\n #boolean mask set to all voxels above .5 in the first volume\n #(.125 is the SPM criterion but .5 seems like a better threshold)\n mask = mean_volume > (.5 * mean_voxel) #threshold can be adjusted!\n out_mask = ~mask\n\n \"\"\"\n The third part of code finds the root mean square of U from step 1, then uses the\n mask from step 2 to determine which components explain data outside the brain\n Selects these \"bad components\" with high \"outsideness\"\n \"\"\"\n\n #Apply mask to C matrix to get all voxels outside of brain\n outside = C_vols[out_mask]\n #Get RMS of the voxels outside, reflecting \"outsideness\" of this scan\n RMS_out = np.sqrt(np.mean((outside ** 2), axis=0))\n\n #Apply mask to C matrix to get all voxels inside brain\n inside = C_vols[mask]\n #Get RMS of the voxels inside, reflecting \"insideness\" of this scan\n RMS_in = np.sqrt(np.mean((inside ** 2), axis=0))\n\n #The closer this ratio is to 1, the worse the volume\n RMS_ratio = RMS_out / RMS_in\n\n \"\"\"\n The fourth part of the code uses the \"bad components\" to generate a new\n \"bad data set\" and then puts this dataset through the outlier detector\n \"\"\"\n\n #Create a boolean mask for the 10% worst PCs (meaning highest RMS ratio)\n PC_bad = np.percentile(RMS_ratio, 90)\n PC_bad_mask = RMS_ratio > PC_bad\n\n U_bad = U[:, PC_bad_mask]\n C_bad = C[PC_bad_mask]\n\n #generates data set based on the bad PCs and (U and C matrices)\n X_bad = U_bad.dot(C_bad).T.reshape((vol_shape + (n_vols,)))\n\n # calculate outliers using iqr_detector\n _, outliers = mah_detector(X_bad)\n\n return X_bad, outliers", "def project_embeddings(embedding, num_dimensions=3, t=0.0, fig=None, ax=None, bins=None, gridsize=50):\n \n if (embedding.shape[-1] > 2):\n transform = PCA(n_components=num_dimensions)\n output_transform = transform.fit_transform(embedding)\n else:\n output_transform = embedding\n \n if num_dimensions == 2:\n xmin = output_transform[:, 0].min() - t\n xmax = output_transform[:, 0].max() + t\n ymin = output_transform[:, 1].min() - t\n ymax = output_transform[:, 1].max() + t\n\n plt.hexbin(output_transform[:, 0], output_transform[:, 1], bins=bins, gridsize=gridsize)\n if ax is None:\n ax = fig.add_subplot(111)\n plt.axis([xmin, xmax, ymin, ymax])\n plt.xlabel('PCA dim 1')\n plt.ylabel('PCA dim 2')\n plt.title('Embedding visualization')\n elif num_dimensions == 3:\n result=pd.DataFrame(output_transform, columns=['PCA%i' % i for i in range(3)])\n if ax is None:\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(result['PCA0'], result['PCA1'], result['PCA2'], cmap=\"Set2_r\", s=10)\n\n # make simple, bare axis lines through space:\n xAxisLine = ((min(result['PCA0']), max(result['PCA0'])), (0, 0), (0,0))\n ax.plot(xAxisLine[0], xAxisLine[1], xAxisLine[2], 'r')\n yAxisLine = ((0, 0), (min(result['PCA1']), max(result['PCA1'])), (0,0))\n ax.plot(yAxisLine[0], yAxisLine[1], yAxisLine[2], 'r')\n zAxisLine = ((0, 0), (0,0), (min(result['PCA2']), max(result['PCA2'])))\n ax.plot(zAxisLine[0], zAxisLine[1], zAxisLine[2], 'r')\n\n # label the axes\n ax.set_xlabel(\"PC1\")\n ax.set_ylabel(\"PC2\")\n ax.set_zlabel(\"PC3\")\n ax.set_title(\"Embedding visualization\")\n\n \n return output_transform, ax", "def pca_genre_actor(self, genre):\n\n genre_actor_frame = self.get_genre_actor_data_frame()\n rank_weight_dict = self.assign_rank_weight(genre_actor_frame[['movieid', 'actor_movie_rank']])\n genre_actor_frame = self.combine_computed_weights(genre_actor_frame, rank_weight_dict, \"TFIDF\", genre)\n temp_df = genre_actor_frame[[\"movieid\", \"actorid_string\", \"total\"]].drop_duplicates()\n genre_actor_tfidf_df = temp_df.pivot(index='movieid', columns='actorid_string', values='total')\n genre_actor_tfidf_df = genre_actor_tfidf_df.fillna(0)\n genre_actor_tfidf_df.to_csv('genre_actor_matrix.csv', index = True , encoding='utf-8')\n\n df = pd.DataFrame(pd.read_csv('genre_actor_matrix.csv'))\n df1 = genre_actor_tfidf_df.values[:, :]\n column_headers = list(df)\n del column_headers[0]\n\n column_headers_names = []\n\n for col_head in column_headers:\n col_head_name = util.get_actor_name_for_id(int(col_head))\n column_headers_names = column_headers_names + [col_head_name]\n\n (U, s, Vh) = util.PCA(df1)\n\n # To print latent semantics\n latents = util.get_latent_semantics(4, Vh)\n util.print_latent_semantics(latents, column_headers_names)\n\n u_frame = pd.DataFrame(U[:, :4], index=column_headers)\n v_frame = pd.DataFrame(Vh[:4, :], columns=column_headers)\n u_frame.to_csv('u_1b_pca.csv', index=True, encoding='utf-8')\n v_frame.to_csv('vh_1b_pca.csv', index=True, encoding='utf-8')\n return (u_frame, v_frame, s)", "def pca(X_train, X_test, n):\n\n print \"Extracting %d principle components from %d features\" % \\\n (n, X_train.shape[1])\n t0 = time()\n pca = RandomizedPCA(n_components=n, whiten=True, random_state=47).fit(X_train)\n print \"done in %0.3fs\" % (time() - t0)\n \n print \"Transforming the input data\"\n t0 = time()\n X_train_pca = pca.transform(X_train)\n X_test_pca = pca.transform(X_test)\n print \"done in %0.3fs\" % (time() - t0)\n\n return X_train_pca, X_test_pca", "def generate_incremental_pca(loom,\n layername,\n batch_size=512,\n n_components=50,\n min_size_for_incrementalization=5000):\n\n from tqdm import tqdm\n from sklearn.decomposition import IncrementalPCA, PCA\n from panopticon.analysis import generate_pca_loadings\n import numpy as np\n batch_size_altered = False\n while loom.shape[1] % batch_size < n_components:\n batch_size += 1\n batch_size_altered = True\n if batch_size_altered:\n print(\n \"Batch size increased to {} so that smallest batch will be greater than n_components\"\n .format(batch_size))\n if loom.shape[1] < min_size_for_incrementalization:\n print(\n \"Loom size below threshold for incremental PCA; running conventional PCA\"\n )\n pca = PCA(n_components=n_components)\n pca.fit(loom[layername][:, :].T)\n else:\n pca = IncrementalPCA(n_components=n_components)\n n_splits = loom.shape[1] // batch_size\n selections = np.array_split(np.arange(loom.shape[1]), n_splits)\n for selection in tqdm(selections):\n pca.partial_fit(loom[layername][:, selection].T)\n\n for i in range(50):\n loom.ra['{} PC {}'.format(layername, i + 1)] = pca.components_[i]\n loom.attrs['NumberPrincipalComponents_{}'.format(layername)] = n_components\n loom.attrs['PCAExplainedVarianceRatio_{}'.format(\n layername)] = pca.explained_variance_ratio_\n generate_pca_loadings(loom, layername, batch_size=batch_size)", "def pca(\n data: AnnData,\n n_components: int = 50,\n features: str = \"highly_variable_features\",\n standardize: bool = True,\n max_value: float = 10,\n random_state: int = 0,\n) -> None:\n\n keyword = select_features(data, features)\n\n start = time.time()\n\n X = data.uns[keyword]\n\n if standardize:\n # scaler = StandardScaler(copy=False)\n # scaler.fit_transform(X)\n m1 = X.mean(axis=0)\n psum = np.multiply(X, X).sum(axis=0)\n std = ((psum - X.shape[0] * (m1 ** 2)) / (X.shape[0] - 1.0)) ** 0.5\n std[std == 0] = 1\n X -= m1\n X /= std\n\n if max_value is not None:\n X[X > max_value] = max_value\n X[X < -max_value] = -max_value\n\n pca = PCA(n_components=n_components, random_state=random_state)\n X_pca = pca.fit_transform(X)\n\n data.obsm[\"X_pca\"] = X_pca\n data.uns[\n \"PCs\"\n ] = pca.components_.T # cannot be varm because numbers of features are not the same\n data.uns[\"pca\"] = {}\n data.uns[\"pca\"][\"variance\"] = pca.explained_variance_\n data.uns[\"pca\"][\"variance_ratio\"] = pca.explained_variance_ratio_\n\n end = time.time()\n logger.info(\"PCA is done. Time spent = {:.2f}s.\".format(end - start))", "def get_features_from_pca(feat_num, feature):\n\n if feature == 'HoG':\n vocab = np.load('vocab_hog.npy')\n elif feature == 'SIFT':\n vocab = np.load('vocab_sift.npy')\n\n # Your code here. You should also change the return value.\n\n def _get_PCA_vectors(feat_num, vocab):\n\n mean = vocab.mean(axis=0, keepdims=True)\n vocab_normalized = vocab - np.multiply(np.ones([vocab.shape[0], mean.shape[0]]),\n mean)\n #TEST: mean unit test\n #mean = vocab_normalized.mean(axis=0, keepdims=True)\n\n cov_matrix = np.cov(np.transpose(vocab_normalized))\n sigma, V = np.linalg.eig(cov_matrix)\n order_sigma = np.argsort(sigma)\n\n PCA_vectors = []\n i = 1\n for f in range(len(order_sigma)):\n eigen_vector = V[:, order_sigma[i]]\n if all(True for _ in np.isreal(eigen_vector)):\n PCA_vectors.append(np.real(eigen_vector))\n i += 1\n if len(PCA_vectors) == feat_num:\n break\n\n return np.array(PCA_vectors)\n\n #MAIN\n PCA_vectors = _get_PCA_vectors(feat_num, vocab)\n\n d = np.dot(vocab, np.transpose(PCA_vectors))\n\n return np.dot(vocab, np.transpose(PCA_vectors))\n #return np.zeros((vocab.shape[0],2))", "def get3dPCA(data):\n\n return PCA(n_components = 3).fit_transform(data)", "def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()", "def pca(self):\n return DataFramePCA(self.subset_)", "def do_pca(x_data, n_class):\n\n run_pca = decomposition.PCA(n_components = n_class)\n pca_fit = run_pca.fit(x_data)\n #pca_fit\n x_pca = run_pca.transform(x_data);\n #pca_cov = run_pca.get_covariance(x_pca)\n #pca_score = run_pca.score(x_data)\n pca_noise = pca_fit.noise_variance_\n pca_var_explained = pca_fit.explained_variance_ratio_\n\n return x_pca, pca_noise, pca_var_explained", "def _pca(self):\n mean_beam = np.mean(self.beam_images, axis=1, keepdims=False)\n mask = self.mask\n beam_images = self.beam_images[:, :self.n_beam_images]\n\n # Subtract mean_beam from images and apply the mask. Element-wise\n # multiplication and subtraction using numpy broadcasting (as commented\n # out below) requires 3 large matrices in memory at an intermediate\n # point in the computation, namely right after (beam_images -\n # mean_beam_2d) is evaluated and memory for centered_masked_images is\n # allocated.\n # mask_2d = mask[:,np.newaxis]\n # mean_beam_2d = mean_beam[:,np.newaxis]\n # centered_masked_images = mask_2d * (beam_images - mean_beam_2d)\n\n # Instead of that direct approach, use self._center_and_mask_numba() or\n # self._center_and_mask_in_place(). As of this writing the _in_place\n # version is faster, but this may change in the future since the numba\n # version supports parallelization.\n centered_masked_images = self._center_and_mask_in_place(\n beam_images,\n mask,\n mean_beam,\n )\n # centered_masked_images should be C-contiguous already but it's good to\n # make sure.\n centered_masked_images = np.ascontiguousarray(centered_masked_images)\n\n # Compute the masked principal components\n # -1 since last eigenvector isn't necessarily orthogonal to the others.\n n_eigs = min(self.n_beam_images - 1, self.max_principal_components)\n n_eigs = max(n_eigs, 1) # Need at least one.\n # .T means transpose, @ means matrix multiplication.\n cov_mat = centered_masked_images.T @ centered_masked_images\n del centered_masked_images # Free up memory.\n if self.use_sparse_routines:\n variances, principal_components = eigsh(\n cov_mat, k=n_eigs, which='LM')\n else:\n eigvals_param = (\n self.n_beam_images - n_eigs,\n self.n_beam_images - 1)\n # overwrite_a might reduce memory usage\n variances, principal_components = eigh(\n cov_mat, eigvals=eigvals_param, overwrite_a=True)\n del cov_mat # Free up memory.\n\n # Reverse ordering to put largest eigenvectors/eigenvalues first\n principal_components = np.fliplr(principal_components)\n variances = np.flip(variances)\n\n # principal_components isn't always C-contiguous, and when it's not the\n # matrix multiplication below becomes extremely slow. It's much faster\n # to make it C-contiguous first so that numpy can use faster matrix\n # multiplication routines behind the scenes.\n principal_components = np.ascontiguousarray(principal_components)\n\n # Construct the un-masked basis vectors.\n centered_images = beam_images - mean_beam[:, np.newaxis]\n # centered_images should be C-contiguous already but it's good to make\n # sure.\n centered_images = np.ascontiguousarray(centered_images)\n principal_components = centered_images @ principal_components\n del centered_images # Free up memory.\n\n # As of this writing, self._normalize_vectorized() is faster than using\n # self._normalize_numba() despite the fact that the latter is uses numba\n # and allows for parallelization. That may change in the future though.\n principal_components = self._normalize_vectorized(\n principal_components,\n mask,\n )\n\n return mean_beam, principal_components, variances", "def apply_PCA(data, ncomp):\n import sklearn.decomposition as dc\n \n pca = dc.PCA(n_components=ncomp, whiten=False, svd_solver='full')\n cps = pca.fit_transform(data)\n svl = pca.singular_values_\n return cps,pca,svl", "def run(self, data):\n\t\treduced_data = PCA(n_components=2).fit_transform(data)\n\n\t\t# Run the algorithm\n\t\tself.estimator.fit_transform(reduced_data)\n\n\t\t# Save all relevent properties\n\t\tself.input_data = data\n\t\tself.centroids = self.estimator.cluster_centers_\n\t\tself.node_positions = reduced_data\n\t\tself.labels = self.estimator.labels_\n\n\t\t# Enable visualising when debugging\n\t\t# self.visualize(reduced_data)", "def _reduceFeatures(self):\n # Adds up all profiles corresponding to each author,\n # then compiles into a matrix of these \"group\" profiles.\n group_profiles = {auth : zeros(len(self.alph)**self.N) for auth in set(self.train_data[1])}\n for i in range(len(self.train_data[1])):\n group_profiles[self.train_data[1][i]] += self.train_data[0][i]\n profile_matrix = array([group_profiles[auth] for auth in group_profiles])\n\n # Takes the variances for all features across the \"group\" profiles,\n # then extracts the indices of the features with the highest variances.\n vars = profile_matrix.var(axis=0)\n self.feature_indices = argsort(vars)[-self.features:]\n # Recompiles the training data.\n self.train_data[0] = array([prof[self.feature_indices] for prof in self.train_data[0]])", "def feature_cPCA24(wv, n_components=12, incremental=False, batch_size=None):\n if incremental:\n raise NotImplementedError(\"Can't run incremental PCA yet.\")\n\n ers = np.reshape(np.transpose(wv[:24, :, :], axes=(1, 0, 2)), (24 * N_CHANNELS, -1))\n pca = PCA(n_components)\n scores = pca.fit_transform(ers.T)\n return scores", "def feature_cPCA(wv, n_components=12, incremental=False, batch_size=None):\n if incremental:\n raise NotImplementedError(\"Can't run incremental PCA yet.\")\n\n ers = np.reshape(np.transpose(wv, axes=(1, 0, 2)), (N_SAMPLES * N_CHANNELS, -1))\n pca = PCA(n_components)\n scores = pca.fit_transform(ers.T)\n return scores", "def preprocess_features(npdata, pca=128):\n _, ndim = npdata.shape\n npdata = npdata.astype('float32')\n\n # Using PCA didn't help in our case.\n \n # Apply PCA-whitening with Faiss\n #mat = faiss.PCAMatrix (ndim, pca, eigen_power=-0.9)\n #mat.train(npdata)\n #assert mat.is_trained\n #npdata = mat.apply_py(npdata)\n\n\n # L2 normalization\n row_sums = np.linalg.norm(npdata, axis=1)\n npdata = npdata / row_sums[:, np.newaxis]\n\n return npdata", "def transform(self, X):\n check_is_fitted(self, 'pca_')\n # check on state of X and cols\n X, _ = validate_is_pd(X, self.cols)\n cols = _cols_if_none(X, self.cols)\n\n other_nms = [nm for nm in X.columns if nm not in cols]\n transform = self.pca_.transform(X[cols].as_matrix())\n\n # do weighting if necessary\n if self.weight:\n # get the weight vals\n weights = self.pca_.explained_variance_ratio_\n weights -= np.median(weights)\n weights += 1\n\n # now add to the transformed features\n transform *= weights\n\n left = pd.DataFrame.from_records(data=transform,\n columns=[('PC%i' % (i + 1)) for i in range(transform.shape[1])])\n\n # concat if needed\n x = pd.concat([left, X[other_nms]], axis=1) if other_nms else left\n return x if self.as_df else x.as_matrix()", "def PCA_subtraction(im, ref_lib, num_PCA_modes):\n print('Performing PCA background subtraction using {} modes'.format(num_PCA_modes))\n #concatenate input image into 1-D array\n im_x = im.shape[1]\n im_y = im.shape[0]\n \n im = im.ravel()\n\n num_PCA_modes = np.array(num_PCA_modes)\n \n # reads list of reference frames into data matrix by first concatenating the 2-D .fits images\n # into 1-D arrays and then row stacking these images into a 2-D np.array\n try:\n ref_frames = np.stack([fits.getdata(ref_lib[i]).ravel() for i in range(len(ref_lib))], axis=0)\n except:\n ref_frames = np.stack([ref_lib[i].ravel() for i in range(len(ref_lib))], axis=0)\n\n # subtracts the mean of each reference frame from each reference frame \n ref_frames_mean_sub = ref_frames - np.nanmean(ref_frames, axis=1)[:, None]\n ref_frames_mean_sub[np.where(np.isnan(ref_frames_mean_sub))] = 0\n \n # import pdb; pdb.set_trace()\n # creates covariance matrix from mean subtracted reference frames \n covar_psfs = np.cov(ref_frames_mean_sub)\n tot_basis = covar_psfs.shape[0]\n \n num_PCA_modes = np.clip(num_PCA_modes - 1, 0, tot_basis-1) # clip values, for output consistency we'll keep duplicates\n max_basis = np.max(num_PCA_modes) + 1 # maximum number of eigenvectors/KL basis we actually need to use/calculate\n \n # calculates eigenvalues and eigenvectors of the covariance matrix, but only the ones we need (up to max basis)\n evals, evecs = la.eigh(covar_psfs, eigvals=(tot_basis-max_basis, tot_basis-1))\n \n evals = np.copy(evals[::-1])\n evecs = np.copy(evecs[:,::-1], order='F') \n \n # calculates the PCA basis vectors\n basis_vecs = np.dot(ref_frames_mean_sub.T, evecs)\n basis_vecs = basis_vecs * (1. / np.sqrt(evals * (np.size(im) - 1)))[None, :] #multiply a value for each row\n \n #subtract off the mean of the input frame\n im_mean_sub = im - np.nanmean(im)\n \n # duplicate science image by the max_basis to do simultaneous calculation for different number of PCA modes\n im_mean_sub_rows = np.tile(im_mean_sub, (max_basis, 1))\n im_rows_selected = np.tile(im_mean_sub, (np.size(num_PCA_modes), 1)) # this is the output image which has less rows\n \n # bad pixel mask\n # do it first for the image we're just doing computations on but don't care about the output\n im_nanpix = np.where(np.isnan(im_mean_sub_rows))\n im_mean_sub_rows[im_nanpix] = 0\n # now do it for the output image\n im_nanpix = np.where(np.isnan(im_rows_selected))\n im_rows_selected[im_nanpix] = 0\n \n inner_products = np.dot(im_mean_sub_rows, np.require(basis_vecs, requirements=['F']))\n # select the KLIP modes we want for each level of KLIP by multiplying by lower diagonal matrix\n lower_tri = np.tril(np.ones([max_basis, max_basis]))\n inner_products = inner_products * lower_tri\n \n # make a model background for each number of basis vectors we actually output\n model = np.dot(inner_products[num_PCA_modes,:], basis_vecs.T)\n \n # subtract model from input frame for each number of PCA modes chosen\n PCA_sub_images = (im_rows_selected - model).reshape(np.size(num_PCA_modes), im_y, im_x)\n\n #Adding back in the mean to the model so that the model can be subtracted from the original image later. \n if type(num_PCA_modes) is np.int64:\n return PCA_sub_images[0], model.reshape(im_y, im_x)+np.nanmean(im)\n elif type(num_PCA_modes) is np.ndarray:\n return PCA_sub_images, model.reshape(np.size(num_PCA_modes), im_y, im_x)+np.nanmean(im)\n \n else:\n print('Unsupported datatype for variable: num_PCA_modes. Variable must be either int or 1-D np.ndarray')", "def run_pca(data_file, rs, n_components, outfile1, outfile2):\n print('running PCA with n_components={}'.format(n_components))\n day_batcher = DayBatcher(data_file, skiprow=1, delimiter=' ')\n mat = day_batcher.next_batch()\n rst = []\n while mat is not None:\n if mat.shape[1] == 13:\n # use compact10d\n datadict = {'features': mat[:, 3:],\n 'red': mat[:, 2],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n else:\n # use all_fixed\n datadict = {'features': mat[:, 14:],\n 'red': mat[:, 13],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n batch = scale(datadict['features'])\n pca = PCA(n_components=n_components, random_state=rs)\n pca.fit(batch)\n data_reduced = np.dot(batch, pca.components_.T) # pca transform\n data_original = np.dot(data_reduced, pca.components_) # inverse_transform\n pointloss = np.mean(np.square(batch - data_original), axis=1)\n loss = np.mean(pointloss)\n for d, u, t, l, in zip(datadict['day'].tolist(),\n datadict['user'].tolist(),\n datadict['red'].tolist(),\n pointloss.flatten().tolist()):\n rst.append((u, d, l, t))\n mat = day_batcher.next_batch()\n train_rst, test_rst = split_train_test(rst)\n save_rst(train_rst, outfile1)\n save_rst(test_rst, outfile2)\n eval_cr(test_rst, 'pca')", "def test_reduce_features_size(self):\n # Get some data\n data = array([[0.564, 20.661, 1], [-18.512, 41.168, -1],\n [-0.009, 20.440, 7]])\n cdata = CData(data)\n\n # ===================================\n # Perform PCA to reduce to 2 features\n # ===================================\n\n # Reduce by nearest int closest to 60%, rounding up\n frac = 0.6\n cdata.reduce_features(frac)\n self.assertTrue(cdata.data.shape == (3, 2))", "def main(desc_key, fxyz, peratom, scale, pca_d, keep_raw=False, output=None, prefix='ASAP'):\n\n if output is None:\n output = prefix + \"-pca-d\" + str(pca_d) + '.xyz'\n peratom = bool(peratom)\n\n # read the xyz file\n frames = ase.io.read(fxyz, ':')\n n_frames = len(frames)\n print('load xyz file: ', fxyz, ', a total of ', str(n_frames), 'frames')\n\n # extract the descriptors from the file\n desc = []\n if n_frames == 1 and not peratom:\n raise RuntimeError('Per-config PCA not possible on a single frame')\n\n # retrieve the SOAP vectors --- both of these throw a ValueError if any are missing or are of wrong shape\n if peratom:\n desc = np.concatenate([a.get_array(desc_key) for a in frames])\n else:\n desc = np.row_stack([a.info[desc_key] for a in frames])\n\n # scale & center\n if scale:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n print('DEBUG: {}'.format(desc.shape))\n print(scaler.fit(desc))\n desc = scaler.transform(desc) # normalizing the features\n\n # fit PCA\n proj, pvec = pca(desc, pca_d)\n # could do with sklearn as well\n # from sklearn.decomposition import PCA\n # pca_sklearn = PCA(n_components=4) # can set svd_solver\n # proj = pca_sklearn.fit_transform(desc)\n # pvec = pca_sklearn.components_\n\n # add coords to info/arrays\n if peratom:\n running_index = 0\n for at in frames:\n n_atoms = len(at)\n at.arrays['pca_coord'] = proj[running_index:running_index + n_atoms, :].copy()\n running_index += n_atoms\n\n if not keep_raw:\n for at in frames:\n del at.arrays[desc_key]\n else:\n for i, at in enumerate(frames):\n at.info['pca_coord'] = proj[i]\n\n if not keep_raw:\n for at in frames:\n del at.info[desc_key]\n\n # save\n ase.io.write(output, frames, write_results=False)", "def optimize_pca(X,Y):\n # {0, 10, 20, ..., 590} \n for n in range(0,599,10):\n \n #Fit PCA\n pca = PCA(n_components=n).fit(X)\n # Plot variance\n pylab.scatter(n, sum(pca.explained_variance_ratio_))\n \n #Place 95% line.\n pylab.axhline(y=0.95, color='r')", "def pca(data):\n mean = data.sum(axis=0) / data.shape[0]\n # show_image(mean)\n cv_matrix = np.cov(data.T)\n e_values, e_vectors = la.eig(cv_matrix)\n return e_values, e_vectors.T, mean", "def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)", "def plot_embedding_pca(features, labels):\n\n import bob.learn.linear\n import matplotlib.pyplot as mpl\n\n colors = ['#FF0000', '#FFFF00', '#FF00FF', '#00FFFF', '#000000',\n '#AA0000', '#AAAA00', '#AA00AA', '#00AAAA', '#330000']\n\n # Training PCA\n trainer = bob.learn.linear.PCATrainer()\n machine, lamb = trainer.train(features.astype(\"float64\"))\n\n # Getting the first two most relevant features\n projected_features = machine(features.astype(\"float64\"))[:, 0:2]\n\n # Plotting the classes\n n_classes = max(labels)+1\n fig = mpl.figure()\n\n for i in range(n_classes):\n indexes = numpy.where(labels == i)[0]\n\n selected_features = projected_features[indexes,:]\n mpl.scatter(selected_features[:, 0], selected_features[:, 1],\n marker='.', c=colors[i], linewidths=0, label=str(i))\n mpl.legend()\n return fig", "def feature_chwPCA(wv, dims=3, energy_normalize=True):\n pcas = []\n pca_scores = []\n for d in range(4):\n pca = PCA(n_components=dims)\n data = wv[:, d, :].T.astype('float64').copy()\n # data_s = data - np.mean(data, axis=0) # this messes things up?!\n if energy_normalize:\n l2 = feature_energy(data.T)[:, np.newaxis]\n\n # With dead channels we end up with zero-energy waveforms sometimes, resulting in division by zero.\n zero_energy_waveforms = (l2 == 0).nonzero()\n if zero_energy_waveforms[0].shape[0]:\n logger.warning(\n 'Found {} instances of zero-energy waveforms in channel {}. Settings those to energy=1.0'.format(\n zero_energy_waveforms[0].shape[0], d))\n l2[zero_energy_waveforms] = 1.0\n\n # normaliz\n # e all waveforms by their l2 norm/energy\n data /= l2\n\n scores = pca.fit_transform(data)\n if np.isnan(scores).any():\n logger.warning('NaN in PCA scores, setting those to 0.0')\n scores.nan_to_num(0)\n pca_scores.append(scores)\n pcas.append(pca)\n pca_scores = np.concatenate(pca_scores, axis=1)\n return pca_scores", "def getPCA(data):\n #covM = np.cov(data.T) #note that np.cov define row as variables, col as observations\n #corM = np.corrcoef(data.T) # we will use correlation matrix instead of cov.\n covM = np.cov(data.T)\n eigvalue,eigvector = np.linalg.eig(covM) # each col of the eigvector matrix corresponds to one eigenvalue. So, each col is the coeff of one component\n pca = np.dot(data,eigvector) # each col is one pca, each row is one obs in that pca. \n return eigvalue,eigvector,pca", "def do_PCA_and_save(activations_dir, save_dir, seed=None):\n if seed is None:\n seed = set_seed()\n\n layers = ['layer_1', 'layer_2', 'layer_3', 'layer_4', 'layer_5', 'layer_6',\n 'layer_7', 'layer_8']\n\n # Number of Principal Components\n n_components = 100\n\n if not op.exists(save_dir):\n os.makedirs(save_dir)\n\n for layer in tqdm(layers):\n regex = activations_dir + '/*' + layer + '.npy'\n activations_file_list = sorted(glob.glob(regex))\n feature_dim = np.load(activations_file_list[0])\n x = np.zeros((len(activations_file_list), feature_dim.shape[0]))\n for i, activation_file in enumerate(activations_file_list):\n temp = np.load(activation_file)\n x[i, :] = temp\n x_train = x[:1000, :]\n x_test = x[1000:, :]\n\n x_test = StandardScaler().fit_transform(x_test)\n x_train = StandardScaler().fit_transform(x_train)\n ipca = PCA(n_components=n_components, random_state=seed)\n ipca.fit(x_train)\n\n x_train = ipca.transform(x_train)\n x_test = ipca.transform(x_test)\n train_save_path = op.join(save_dir, \"train_\" + layer)\n test_save_path = op.join(save_dir, \"test_\" + layer)\n np.save(train_save_path, x_train)\n np.save(test_save_path, x_test)", "def pca_transform(X, n_components=None):\n return PCA(n_components=n_components).fit_transform(X)", "def performPCA(dataSet, numShapesInDataset, numPointsInShapes, num_components):\n\tdataMat = np.array(dataSet).reshape((numShapesInDataset, numPointsInShapes*2))\n\t\n\t\"\"\"Creating the covariance matrix\"\"\"\n\tcovarMat = np.cov(dataMat.T)\n\t\t\n\t\"\"\"Generating the eigen vectors and eigen values\"\"\"\n\teigVals, eigVecs = np.linalg.eig(covarMat)\n\n\t\"\"\"Taking the first num_components eigen vectors and values, and the center of the space.\"\"\"\n\tprincipleComponents = np.real(eigVecs[:, 0:num_components])\n\tprincipleValues = np.real(eigVals[0:num_components])\n\tmeanShape = dataMat.mean(0).reshape((numPointsInShapes * 2, 1))\n\treturn principleComponents, principleValues, meanShape", "def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)", "def reduce_dims(data: tuple, threshold: float, verbose: bool, principal_components: int =0) -> tuple:\n # Standardising\n normal_atts = StandardScaler().fit_transform(\n data[0], data[1])\n\n # Determining how many components to reduce to (confidence lvl set in options)\n pca = PCA(svd_solver='auto')\n reduced_atts = pca.fit_transform(normal_atts)\n\n if principal_components == 0:\n # number of components that would push to set confidence level\n set_principal_comps = len(normal_atts[0])\n total_explained_var = 0.0\n\n for i, ratio in enumerate(pca.explained_variance_ratio_):\n total_explained_var += ratio\n\n # if TEVar is above threshold, reduce to this many components\n if total_explained_var > threshold and i < len(normal_atts[0])-1:\n set_principal_comps = i+1\n break\n\n principal_components = set_principal_comps\n else:\n set_principal_comps = principal_components\n\n # Reduce\n pca = PCA(svd_solver='auto', n_components=set_principal_comps)\n reduced_atts = pca.fit_transform(normal_atts)\n\n if verbose:\n print_PCA_variance_ratios(pca.explained_variance_ratio_)\n return (reduced_atts, data[1])", "def process_features(features, neutral_factor):\n\n N = create_neutral_vector(np.array([[features.shape[1], 1]]), features.shape[0])\n\n pc_projections, pcs = pca.neutral_sub_pca_vector(features, neutral_factor*N)\n\n return pc_projections, pcs", "def PCA(X, k):\n cov = np.matmul(np.matrix.transpose(X), X)\n w, v = np.linalg.eig(cov)\n k_largest = np.argsort(w)[::-1][:k]\n v = np.matrix.transpose(v)\n U = v[k_largest]\n S = w[k_largest]\n return U, S", "def performpca(df, nb_pc=5):\n # Remove uncomplete series\n print(df.shape)\n normalized=(df-df.mean())/df.std()\n # normalized.plot()\n # plt.show()\n pca = PCA(nb_pc)\n pca.fit(normalized)\n return pca, normalized", "def pca_decomposition(data, dept, n_components=12):\n try:\n df_svd = pivot_df(data, dept)\n pca = PCA(n_components=n_components)\n df_low = pca.fit_transform(df_svd)\n df_inverse = pca.inverse_transform(df_low)\n\n # re-frame\n df_inverse = reframe_df(previous_df=df_svd, processed_data=df_inverse)\n return df_inverse\n\n except:\n # if pca fail,\n return pivot_df(data, dept)", "def removePCA(self,pcaId):\n if pcaId in self.openConnections:\n del self.openConnections[pcaId]", "def pca_pubdev_4167_OOM():\n h2o.remove_all()\n transform_types = [\"NONE\", \"STANDARDIZE\", \"NORMALIZE\", \"DEMEAN\", \"DESCALE\"] # make sure we check all tranforms\n transformN = transform_types[randint(0, len(transform_types)-1)]\n print(\"transform used on dataset is {0}.\\n\".format(transformN))\n\n training_data = h2o.import_file(path=pyunit_utils.locate(\"/Users/wendycwong/gitBackup/SDatasets/pubdev_4167_Avkash/m120K.tar\")) # Nidhi: import may not work\n\n gramSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN)\n gramSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n powerSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN, pca_method=\"Power\")\n powerSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n # compare singular values and stuff between power and GramSVD methods\n print(\"@@@@@@ Comparing eigenvalues between GramSVD and Power...\\n\")\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"importance\"],\n powerSVDPCA._model_json[\"output\"][\"importance\"],\n [\"Standard deviation\", \"Cumulative Proportion\", \"Cumulative Proportion\"],\n tolerance=1e-5, check_all=False)\n print(\"@@@@@@ Comparing eigenvectors between GramSVD and Power...\\n\")\n # compare singular vectors\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"names\"], tolerance=1e-1,\n check_sign=True)", "def pca(X: np.array, k: int) -> np.array:\n n, d = X.shape\n X = X - np.mean(X, 0) # mean value of each dimension\n C = np.dot(np.transpose(X), X) # covariance matrix\n if not PCA._check_real_symmetric(C):\n raise ArithmeticError('Covariance matrix is not real symmetric')\n eig_val, eig_vec = np.linalg.eig(C) # eigenvalue, eigenvector\n eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(d)] # eigen-value-vector tuples\n topk_pairs = heapq.nlargest(k, eig_pairs) # retrieve top-k eigenvalue pairs\n P = np.array([pair[1] for pair in topk_pairs]) # permutation matrix\n return np.dot(np.real(P), np.transpose(X)).T", "def reduce_dimensions(feature_vectors_full, model):\n \"\"\"Subtracting the mean of the feature vectors being classified\n as opposed to the average of the test data's feature vectors seems to\n improve performance (I think this is to do with the noise; as on clean pages\n the average for a pixel that's white in all feature vectors would be 255. In\n a noisy image, it'd be lower, so the white areas wouldn't get \"centred\"\n around white by subtracting 255 any more.).\n \"\"\"\n return np.dot(\n (feature_vectors_full - np.mean(feature_vectors_full,axis=0)), \n np.array(model[\"eigenvectors\"]))", "def pca(X, k):\n n, dim = X.shape\n\n # Center the data\n X_mean = np.mean(X, axis = 0)\n X = X - X_mean\n # Get the covariance matrix\n covariance_matrix = np.dot(X.T, X) / (n - 1)\n eigval, eigvec = eigs(covariance_matrix, k)\n return np.array(eigvec), np.array(eigval)", "def reduce_svd(embeddings, seed=0):\n svd = TruncatedSVD(n_components=2, n_iter=10, random_state=seed)\n return svd.fit_transform(embeddings)", "def pca_helper(_args):\n # unpack args\n _trimmed_frame, _win, _sou_name, _sou_dir, _out_path, \\\n _library, _library_names_short, _fwhm, _plsc, _sigma, _nrefs, _klip = _args\n\n # run pca\n try:\n output = pca(_trimmed_frame=_trimmed_frame, _win=_win, _sou_name=_sou_name,\n _sou_dir=_sou_dir, _out_path=_out_path,\n _library=_library, _library_names_short=_library_names_short,\n _fwhm=_fwhm, _plsc=_plsc, _sigma=_sigma, _nrefs=_nrefs, _klip=_klip)\n return output\n except Exception as _e:\n print(_e)\n return None\n # finally:\n # return None", "def rescale_intrinsic(self):\n # scale focal length and principal points wrt image resizeing\n if self.downscale > 1:\n self.K = self.K_orig.copy()\n self.K[0, 0] /= float(self.downscale)\n self.K[1, 1] /= float(self.downscale)\n self.K[0, 2] /= float(self.downscale)\n self.K[1, 2] /= float(self.downscale)\n self.intrinsic = self.K\n else:\n self.K = self.intrinsic = self.K_orig.copy()", "def apply_algorithms(x: np.ndarray, label_true, params, components, database_name):\n names = ['Original dataset', 'Our PCA results', 'KMeans with previous our PCA reduction',\n 'KMeans without previous reduction (PCA)', 'KMeans without previous reduction (T-SNE)']\n\n datasets = []\n labels = []\n reduct = []\n\n # get the representation of the original matrix splitted to be plotted\n partial_x = split_db_original(x, components)\n datasets.append(partial_x)\n labels.append(label_true)\n reduct.append(None)\n\n # get our PCA\n pca = OPCA(n_components=params['n_components'])\n our_pca = pca.fit_transform(x)\n datasets.append(our_pca)\n labels.append(label_true)\n reduct.append(None)\n\n # get PCA and IPCA from sklearn\n sk_pca = pca_sklearn(x, params['db_name'], params['n_components'])\n sk_ipca = ipca_sklearn(x, params['db_name'], params['n_components'])\n\n # compare the three PCA algorithms\n name = ['Our PCA', 'SK_PCA', 'SK_IPCA', 'original_data']\n pca_data = [our_pca, sk_pca['db'], sk_ipca['db'], x]\n apply_evaluation(pca_data, label_true, params, name, database_name)\n\n # KMeans with PCA reduction\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels_kmeans = algorithm.fit_predict(our_pca)\n datasets.append(our_pca)\n labels.append(labels_kmeans)\n reduct.append(None)\n\n # KMeans without PCA reduction\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels_kmeans = algorithm.fit_predict(x)\n datasets.append(x)\n labels.append(labels_kmeans)\n reduct.append('pca')\n datasets.append(x)\n labels.append(labels_kmeans)\n reduct.append('tsne')\n\n # selection number of dimensions of plot\n if type(params['n_components']) == int:\n if params['n_components'] == 2:\n nd = 2\n if params['n_components'] > 2:\n nd = 3\n elif type(params['n_components']) == float:\n if our_pca.shape[1] == 2:\n nd = 2\n if our_pca.shape[1] > 2:\n nd = 3\n else:\n nd = 3\n\n if nd == 2:\n pca_names = ['PCA Component 1', 'PCA Component 2']\n plot_names = [components[0], pca_names, pca_names, pca_names, ['TSNE 1', 'T-SNE 2']]\n plot2d(datasets, labels, names, plot_names, reduct)\n elif nd == 3:\n pca_names = ['PCA Component 1', 'PCA Component 2', 'PCA Component 3']\n plot_names = [components[0], pca_names, pca_names, pca_names, ['TSNE 1', 'T-SNE 2', 'T-SNE 3']]\n plot3d(datasets, labels, names, plot_names, reduct)", "def run_PCA(self, sparse_matrix):\n\n pca_explained = np.cumsum(PCA().fit(sparse_matrix).explained_variance_ratio_)\n pca_explainedby = np.where(pca_explained>=0.9)[0][0]\n pca = PCA(n_components=pca_explainedby)\n pca.fit(sparse_matrix)\n \n today = datetime.date.today()\n filename = 'sparse_pca_model.pkl'\n joblib.dump(pca, filename)\n \n return pca.transform(sparse_matrix), pca", "def reconstruct(X_input, X_mean, X_std, Y, P, e_scaled, x_col = 0, y_col = 1, dimensions = [0, 1, 2, 3]):\n\t# Reconstruction degrees information retention (~25%, ~50%, ~75%, and ~100%).\n\tfor d in dimensions:\n\t\t# Reconstruct \n\t\tY_proj = Y.iloc[:,0:(d + 1)]\n\t\tX_rec = (Y_proj @ P.iloc[:,0:(d + 1)].T) * X_std + X_mean\n\t\tX_rec.columns = X_input.columns\n\n\t\t# Cumulate percentage information retained\n\t\tdata_retained = e_scaled[range(d + 1)].sum() * 100\n\n\t\tml.plt.figure()\n\t\tml.plt.title(f'Raw vs. Reconstructed D = {d + 1}')\n\t\tml.sns.scatterplot(data = X_input, x = X_input.iloc[:, x_col], y = X_input.iloc[:, y_col], alpha = 0.5, color = 'k', label = 'Raw Data (100%)')\n\t\tml.sns.scatterplot(data = X_rec, x = X_rec.iloc[:, x_col], y = X_rec.iloc[:, y_col], alpha = 0.5, color = 'r', label = f'Reconstructed Data ({data_retained: .2f}%)')", "def testPCA(d = 10, N = 5000, k = 4, min_iter_nr = 20):\n\n print \"Input: dim, samples nr = %d, %d - Output: latent factors nr = %d\" % (d, N, k)\n mu = uniform(1, d)*3.+2.\n sigma = uniform((d,))*0.01\n A = normal(size=(k,d))\n\n # latent variables\n y = normal(0., 1., size=(N, k))\n # observations\n noise = normal(0., 1., size=(N, d)) * sigma\n x = dot(y, A) + mu + noise\n \n # Testing PCA \n for _b, _n in product((True, False), (min_iter_nr, )):\n t_start = time.time()\n PCA = pca(x.T, k = k)\n PCA.InferandLearn(max_iter_nr = _n, svd_on = _b)\n print \"PCA(svd_on=%s, max_iter_nr=%d) learned in %.5f seconds\" % (str(_b), _n, time.time() - t_start)\n print PCA.C\n print \"-\"*70", "def _pca_motion(\n confounds_out, confounds_raw, n_components=0.95, motion_model=\"6params\",\n):\n\n # Run PCA to reduce parameters\n\n motion_confounds = _add_motion_model(motion_model)\n motion_parameters_raw = confounds_raw[list(motion_confounds)]\n\n if n_components == 0:\n confounds_pca = motion_parameters_raw\n\n else:\n motion_parameters_raw = motion_parameters_raw.dropna()\n pca = PCA(n_components=n_components)\n confounds_pca = pd.DataFrame(pca.fit_transform(motion_parameters_raw.values))\n confounds_pca.columns = [\n \"motion_pca_\" + str(col + 1) for col in confounds_pca.columns\n ]\n\n # Add motion parameters to confounds dataframe\n confounds_out = pd.concat((confounds_out, confounds_pca), axis=1)\n\n return confounds_out", "def do_pca(X, y, components: int = 2, plot: bool = True):\n\n new_X = []\n for i in X:\n new_X.append(i.flatten())\n\n X = new_X\n\n # PCA Stuff?\n pca = PCA(n_components=components)\n pca.fit(X)\n\n # Transform input data based on eigenvectors\n X = pca.transform(X)\n\n # Get scatters\n x = [i[0] for i in X]\n w = [i[1] for i in X]\n\n # plot\n\n plt.scatter(x, w, c=y)\n plt.show()", "def pca_data(train_data_lst, test_data_lst, data_anots):\r\n \r\n train_data_pca = []\r\n test_data_pca = []\r\n new_anots = []\r\n\r\n for idx in range(len(train_data_lst)):\r\n pca = PCA(n_components=0.985)\r\n X_train = pca.fit_transform(train_data_lst[idx])\r\n train_data_pca.append(X_train)\r\n \r\n X_test = pca.transform(test_data_lst[idx])\r\n test_data_pca.append(X_test)\r\n new_anots.append(data_anots[idx]+'_pca')\r\n return train_data_pca, test_data_pca, new_anots", "def compute_pca(image_set):\n\n # Check for valid input\n assert(image_set[0].dtype == np.uint8)\n\n # Reshape data into single array\n reshaped_data = np.concatenate([image\n for pixels in image_set for image in\n pixels])\n\n # Convert to float and normalize the data between [0, 1]\n reshaped_data = (reshaped_data / 255.0).astype(np.float32)\n\n # Calculate covariance, eigenvalues, and eigenvectors\n # np.cov calculates covariance around the mean, so no need to shift the\n # data\n covariance = np.cov(reshaped_data.T)\n e_vals, e_vecs = np.linalg.eigh(covariance)\n\n # svd can also be used instead\n # U, S, V = np.linalg.svd(mean_data)\n\n pca = np.sqrt(e_vals) * e_vecs\n\n return pca", "def make_embeddings(self):\n\t\tprint(\"Presetting embedding weights\")\n\t\t\t\n\t\tnp.random.seed(0)\n\t\tweights = np.random.uniform(low = -0.05, high = 0.05, size = (self.FREQCAP, self.EMB_SIZE))\n\t\t\n\t\tcounter = 0\n\n\t\twords = []\n\t\tweights_tmp = []\n\n\t\twith open(self.embeddingpath) as handle:\n\t\t\tfor i, line in enumerate(handle):\n\t\t\t\ttmp = line.strip()\n\t\t\t\tif len(tmp) > 0:\n\t\t\t\t\tsplit = tmp.split(\" \")\n\t\t\t\t\tif split[0] in self.worddict and len(split[1:]) == 300:\n\t\t\t\t\t\twords.append(split[0])\n\t\t\t\t\t\tweights_tmp.append([float(a) for a in split[1:]])\n\t\t\n\t\tweights_tmp = np.array(weights_tmp)\n\n\t\tfor word, column in zip(words, weights_tmp):\n\t\t\tif self.worddict[word] < self.FREQCAP:\n\t\t\t\tcounter += 1\n\t\t\t\tweights[self.worddict[word],:] = column\n\t\t\n\t\tprint(\"Set\", counter, \"of\", weights.shape[0], \"columns\")\n\t\t\n\t\tif self.EMB_SIZE < weights.shape[-1]:\n\t\t\tprint(\"Reducing dimensionality to\", self.EMB_SIZE)\n\t\t\tpca = PCA(self.EMB_SIZE)\n\t\t\tweights = pca.fit_transform(weights)\n\t\t\n\t\tself.embeddings = [weights]", "def inverse_pca(self, pca_img, components):\n reconstruct = np.dot(pca_img, components.T).astype(int)\n return reconstruct.reshape(-1, 28, 28)", "def pca_weights_init(self, data):\n if self._input_len == 1:\n msg = 'The data needs at least 2 features for pca initialization'\n raise ValueError(msg)\n self._check_input_len(data)\n if len(self._neigx) == 1 or len(self._neigy) == 1:\n msg = 'PCA initialization inappropriate:' + \\\n 'One of the dimensions of the map is 1.'\n warn(msg)\n pc_length, pc = linalg.eig(cov(transpose(data)))\n pc_order = argsort(-pc_length)\n for i, c1 in enumerate(linspace(-1, 1, len(self._neigx))):\n for j, c2 in enumerate(linspace(-1, 1, len(self._neigy))):\n self._weights[i, j] = c1*pc[:, pc_order[0]] + \\\n c2*pc[:, pc_order[1]]" ]
[ "0.6760861", "0.6653581", "0.662987", "0.65325135", "0.6529479", "0.6509681", "0.65022707", "0.6487274", "0.64560676", "0.64380586", "0.6406564", "0.63860345", "0.63095546", "0.62621355", "0.6250614", "0.62483966", "0.62223923", "0.6219198", "0.6173256", "0.6162128", "0.60764277", "0.6066418", "0.60400647", "0.59910566", "0.5984332", "0.5983929", "0.5971605", "0.58940965", "0.58774984", "0.5870393", "0.58338773", "0.58279824", "0.58161306", "0.58019155", "0.5780132", "0.5775006", "0.5770352", "0.568095", "0.56508565", "0.56494635", "0.56169367", "0.55823684", "0.5581609", "0.5580835", "0.5565461", "0.5555786", "0.5535791", "0.5533281", "0.5497704", "0.5496657", "0.5493377", "0.5491426", "0.54846776", "0.54689294", "0.54672617", "0.5459503", "0.5449749", "0.54274106", "0.542221", "0.5414159", "0.5414023", "0.5407035", "0.54027754", "0.5400414", "0.5385983", "0.53691214", "0.5367658", "0.5352688", "0.534819", "0.5331634", "0.53225327", "0.5292007", "0.5286369", "0.5283492", "0.5282615", "0.52419317", "0.52340215", "0.52243215", "0.5211583", "0.51863414", "0.5174032", "0.5154027", "0.5141622", "0.5140665", "0.5139667", "0.51392204", "0.5131163", "0.51282483", "0.51272166", "0.51266015", "0.51201534", "0.51143444", "0.5109736", "0.5108579", "0.51075554", "0.51054263", "0.50987494", "0.5091249", "0.50854737", "0.50653756" ]
0.66272324
3
Provides various metrics between predictions and labels.
def metrics(metric_type: str, preds: list, labels: list): assert metric_type in ['flat_accuracy', 'f1', 'roc_auc', 'ap'], 'Metrics must be one of the following: \ [\'flat_accuracy\', \'f1\', \'roc_auc\'] \ \'precision\', \'recall\', \'ap\']' labels = np.array(labels) # preds = np.concatenate(np.asarray(preds)) if metric_type == 'flat_accuracy': pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) elif metric_type == 'f1': return f1_score(labels, preds) elif metric_type == 'roc_auc': return roc_auc_score(labels, preds) elif metric_type == 'precision': return precision_score(labels, preds) elif metric_type == 'recall': return recall_score(labels, preds) elif metric_type == 'ap': return average_precision_score(labels, preds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_metrics(pred, labels):\n pred_flat = np.argmax(pred, axis = 1).flatten()\n labels_flat = labels.flatten()\n \n flat_accuracy = np.sum(pred_flat == labels_flat) / len(labels_flat)\n \n # sklearn takes first parameter as the true label\n precision = precision_score(labels_flat, pred_flat)\n recall = recall_score(labels_flat, pred_flat)\n \n return flat_accuracy, precision, recall", "def metrics(self, predictions, gts, label_list):\n prediction_labels = np.concatenate([predictions.flatten()])\n gt_labels = np.concatenate([gts.flatten()])\n\n cm = metrics.confusion_matrix(\n gt_labels,\n prediction_labels,\n range(len(label_list)))\n\n # print(\"Confusion matrix :\")\n # print(cm)\n # print(\"---\")\n # Compute global accuracy\n accuracy = sum([cm[x][x] for x in range(len(cm))])\n total = sum(sum(cm))\n oa = accuracy * 100 / float(total)\n # print(\"{} pixels processed\".format(total))\n # print(\"Total accuracy : {}%\".format(accuracy * 100 / float(total)))\n # print(\"---\")\n # Compute kappa coefficient\n total = np.sum(cm)\n pa = np.trace(cm) / float(total)\n pe = np.sum(np.sum(cm, axis=0) * np.sum(cm, axis=1)) / float(total * total)\n kappa = (pa - pe) / (1 - pe)\n # print(\"Kappa: \" + str(kappa))\n return kappa, oa", "def make_metrics(self, predictions):\n\n pred_idx = []\n pred_classes = []\n\n target_idx = []\n target_classes = []\n target_count = len(self._dataset.class_idx2text)\n\n for data_id, pred in predictions.items():\n target = self._dataset.get_ground_truth(data_id)\n\n pred_idx.append(pred[\"class_idx\"])\n pred_classes.append(self._dataset.class_idx2text[pred[\"class_idx\"]])\n\n target_idx.append(target[\"class_idx\"])\n target_classes.append(target[\"class_text\"])\n\n metrics = {\n \"accuracy\": simple_accuracy(pred_idx, target_idx),\n }\n\n if target_count == 2:\n # binary class\n f1_metric = f1(pred_idx, target_idx)\n metrics.update(f1_metric)\n\n matthews_corr_metric = matthews_corr(pred_idx, target_idx)\n metrics.update(matthews_corr_metric)\n return metrics", "def get_metrics(y_true, y_pred):\n return {'acc': np.mean(y_true == y_pred)}", "def update_metrics(self, metrics, predictions, labels):\n return", "def estimate_metrics(\n self,\n all_labels,\n all_preds\n ):\n n_predictions = len(all_preds)\n\n for metric in self.metrics:\n # report everything but loss\n if metric.__name__ is not \"loss\":\n if isinstance(all_preds[0], list):\n result = np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])\n else:\n result = metric(all_labels, all_preds)\n \n if metric.__name__ in self.multi_batch_metrics:\n self.multi_batch_metrics[metric.__name__].append(result)\n self.multi_batch_metrics[\"len_\" + metric.__name__].append(\n n_predictions)\n else:\n self.multi_batch_metrics[metric.__name__] = [result]\n self.multi_batch_metrics[\"len_\" + metric.__name__] = [n_predictions]", "def metrics(outputs, labels, threshold):\n\n #convert the torch tensors to numpy\n y_pred = outputs.numpy()\n y_true = labels.numpy()\n \n #Predict 0/1 for each class based on threshold\n y_pred[y_pred > threshold] = 1\n y_pred[y_pred <= threshold] = 0\n \n #Calculate various metrics, for multilabel, multiclass problem\n accuracy = accuracy_score(y_true, y_pred)\n Hloss = hamming_loss(y_true, y_pred)\n precision = precision_score(y_true, y_pred, average = 'macro')\n recall = recall_score(y_true, y_pred, average = 'macro')\n F1_score = f1_score(y_true, y_pred, average = 'macro')\n \n macro_score = {'accuracy': accuracy, 'Hloss': Hloss, 'precision': precision, 'recall':recall, 'F1_score':F1_score }\n \n # compare outputs with labels and divide by number of tokens (excluding PADding tokens)\n return macro_score", "def compute_batch_metrics(y_true, y_pred, num_labels = 4): \n \n # Declarating list to store results\n acc = []\n pre = []\n rec = []\n det = []\n rmse = []\n \n for batch in np.arange(y_true.shape[0]):\n \n # Declarating list to store individual results\n batch_acc = []\n batch_pre = []\n batch_rec = []\n batch_det = []\n batch_rmse = []\n \n for label in np.arange(num_labels):\n \n # Computing and storing metrics for each class\n batch_acc.append(accuracy_score(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_pre.append(precision_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_rec.append(recall_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_det.append(detection_rate(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_rmse.append(sqrt(mse(y_true[batch, label, :], y_pred[batch, label, :])))\n \n # Storing mean results of the instance\n acc.append(np.mean(batch_acc))\n pre.append(np.mean(batch_pre))\n rec.append(np.mean(batch_rec))\n det.append(np.mean(batch_det))\n rmse.append(np.mean(batch_rmse))\n \n # Returning mean of all results\n return np.mean(acc), np.mean(pre), np.mean(rec), np.mean(det), np.mean(rmse)", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def eval_metrics(y, pred):\n classification_error = np.sum(pred != y) / float(y.shape[0])\n return classification_error", "def get_metrics(self, x, y):\n metrics = dict()\n y_predicted = self.predict(x)\n metrics['Accuracy'] = accuracy_score(y, y_predicted)\n metrics['F1'] = f1_score(y, y_predicted)\n metrics['Precision'] = precision_score(y, y_predicted)\n\n return metrics", "def calculate_metrics(self):\n sensitivity = TP + FN\n sensitivity = TP / sensitivity\n\n specificity = TN + FP\n specificity = TN / specificity\n\n accuracy = TP + FP + TN + FN\n divisor = TP + TN\n accuracy = divisor / accuracy\n\n positive_predictive = TP + FP\n positive_predictive = TP / positive_predictive\n\n negative_predictive = TN + FN\n negative_predictive = TN / negative_predictive\n\n # This is for format decimal in metrics\n sensitivity = float(\"{0:.4f}\".format(sensitivity))\n specificity = float(\"{0:.4f}\".format(specificity))\n accuracy = float(\"{0:.4f}\".format(accuracy))\n positive_predictive = float(\"{0:.4f}\".format(positive_predictive))\n negative_predictive = float(\"{0:.4f}\".format(negative_predictive))\n\n average = (sensitivity + specificity + accuracy + positive_predictive + negative_predictive) / 5\n\n average = float(\"{0:.4f}\".format(average))\n\n metrics = [sensitivity, specificity, accuracy,positive_predictive,negative_predictive, average]\n\n return metrics", "def get_metrics(y_test, y_predicted):\r\n # true positives / (true positives+false positives)\r\n precision = precision_score(y_test, y_predicted, pos_label=None,\r\n average='weighted')\r\n # true positives / (true positives + false negatives)\r\n recall = recall_score(y_test, y_predicted, pos_label=None,\r\n average='weighted')\r\n\r\n # harmonic mean of precision and recall\r\n f1 = f1_score(y_test, y_predicted, pos_label=None, average='weighted')\r\n\r\n # true positives + true negatives/ total\r\n accuracy = accuracy_score(y_test, y_predicted)\r\n return accuracy, precision, recall, f1", "def evaluate(labels, predictions):\n TP = 0\n actualP = 0\n TN = 0\n actualN = 0\n for label, prediction in zip(labels, predictions):\n if label ==1:\n actualP +=1\n if prediction == 1:\n TP +=1\n else:\n actualN +=1\n if prediction ==0:\n TN +=1\n \n sensitivity = float(TP/actualP)\n specificity = float(TN/actualN)\n return (sensitivity, specificity)", "def get_metrics(y_test, y_predicted):\n # true positives / (true positives+false positives)\n precision = precision_score(y_test, y_predicted, pos_label=None,\n average='weighted')\n # true positives / (true positives + false negatives)\n recall = recall_score(y_test, y_predicted, pos_label=None,\n average='weighted')\n\n # harmonic mean of precision and recall\n f1 = f1_score(y_test, y_predicted, pos_label=None, average='weighted')\n\n # true positives + true negatives/ total\n accuracy = accuracy_score(y_test, y_predicted)\n return accuracy, precision, recall, f1", "def calculate_metrics(self, predictions, actual):\n\n predictions.dtype = np.bool\n actual.dtype = np.bool\n\n N = len(predictions) * len(predictions[0])\n\n TP = np.sum(np.bitwise_and(predictions, actual))\n FP = np.sum(np.bitwise_and(np.invert(predictions), np.invert(actual) ))\n FN = np.sum(np.bitwise_and(predictions, np.invert(actual)))\n TN = np.sum(np.bitwise_and(np.invert(predictions), (actual)))\n\n correct = np.sum(predictions == actual) / N\n accuracy = (TP + TN) / N\n precision = TP / (TP + FP) # positive predictive value\n sensitivity = TP / (TP + FN) # true positive rate\n specificity = TN / (TN + FP) # true negative rate\n\n return correct, accuracy, precision, sensitivity, specificity", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def evaluate(labels, predictions):\n actual_positive = 0\n actual_negative = 0\n predicted_positive = 0\n predicted_negative = 0\n for i, j in zip(labels, predictions):\n if i == 1:\n actual_positive += i\n predicted_positive += j\n else:\n actual_negative += 1\n if j == 0:\n predicted_negative += 1\n return predicted_positive/actual_positive, predicted_negative/actual_negative", "def _finalize_labels_and_prediction(self):\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n\n if (self.mean is not None) and (self.std is not None):\n # To compensate for the imbalance between labels during training,\n # we normalize the ground truth labels with training mean and std.\n # We need to undo that for evaluation.\n y_pred = y_pred * self.std + self.mean\n\n return y_pred, y_true", "def _finalize_labels_and_prediction(self):\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n\n if (self.mean is not None) and (self.std is not None):\n # To compensate for the imbalance between labels during training,\n # we normalize the ground truth labels with training mean and std.\n # We need to undo that for evaluation.\n y_pred = y_pred * self.std + self.mean\n\n return y_pred, y_true", "def predict_and_analyze(dta, clf, eval_tweets):\n for tweet in eval_tweets:\n tweet['prediction'] = clf.predict([w2v_vector(dta, tweet['text'])])[0]\n\n metrics = {'tp': 0, 'fp': 0, 'tn': 0, 'fn': 0,\n 'precision': 0, 'recall': 0, 'f1': 0}\n for tweet in eval_tweets:\n actual_label = tweet['label']\n prediction = tweet['prediction']\n if actual_label == prediction:\n if prediction == 'relevant':\n metrics['tp'] += 1\n else:\n metrics['tn'] += 1\n else:\n if prediction == 'relevant':\n metrics['fp'] += 1\n else:\n metrics['fn'] += 1\n metrics['precision'] = float(metrics['tp']) / (metrics['tp'] + metrics['fp'])\n metrics['recall'] = float(metrics['tp']) / (metrics['tp'] + metrics['fn'])\n metrics['f1'] = 2 * ((metrics['precision'] * metrics['recall']) /\n (metrics['precision'] + metrics['recall']))\n return metrics", "def get_metrics(x, y, num_labels): \n total_f1_score = 0\n total_accuracy = 0\n \n for inp, out in zip(x, y): \n f1 = fscore(inp, list(out), labels=np.arange(num_labels), average='weighted')\n \n total_f1_score += f1\n total_accuracy += get_accuracy(inp, out) \n \n return total_f1_score/len(x), total_accuracy/len(x)", "def multiclass_metrics(pred, gt):\r\n eps=1e-6\r\n overall = {'precision': -1, 'recall': -1, 'f1': -1}\r\n NP, NR, NC = 0, 0, 0 # num of pred, num of recall, num of correct\r\n for ii in range(pred.shape[0]):\r\n pred_ind = np.array(pred[ii]>0.5, dtype=int)\r\n gt_ind = np.array(gt[ii]>0.5, dtype=int)\r\n inter = pred_ind * gt_ind\r\n # add to overall\r\n NC += np.sum(inter)\r\n NP += np.sum(pred_ind)\r\n NR += np.sum(gt_ind)\r\n if NP > 0:\r\n overall['precision'] = float(NC)/NP\r\n if NR > 0:\r\n overall['recall'] = float(NC)/NR\r\n if NP > 0 and NR > 0:\r\n overall['f1'] = 2*overall['precision']*overall['recall']/(overall['precision']+overall['recall']+eps)\r\n return overall", "def evaluate(true_labels, predicted_labels):\n accuracy = np.round(metrics.accuracy_score(true_labels, predicted_labels), \n 2)\n precision = np.round(metrics.precision_score(true_labels, predicted_labels, \n average='weighted'), 2)\n recall = np.round(metrics.recall_score(true_labels, predicted_labels,\n average='weighted'), 2)\n f1 = np.round(metrics.f1_score(true_labels, predicted_labels, \n average='weighted'), 2)\n \n return accuracy, precision, recall, f1", "def model_metrics(X, y, model, data_set = 'data_set'):\n score = model.score(X, y)\n matrix = confusion_matrix(y, model.predict(X))\n tpr = matrix[1,1] / (matrix[1,1] + matrix[1,0])\n fpr = matrix[0,1] / (matrix[0,1] + matrix[0,0])\n tnr = matrix[0,0] / (matrix[0,0] + matrix[0,1])\n fnr = matrix[1,0] / (matrix[1,1] + matrix[1,0])\n prc = matrix[1,1] / (matrix[1,1] + matrix[0,1])\n \n print(f'{data_set} accuracy score: {score:.2%}')\n print(f'{data_set} precision score {prc:.2%}')\n print(f'{data_set} recall score: {tpr:.2%}\\n')\n class_report = classification_report(y, model.predict(X), zero_division=True)\n print('-------------------------------')\n print(f'classification report')\n print(class_report)\n print ('-------------------------------\\n')\n print('confusion matrix')\n print(f'{matrix}\\n')\n print(f'{data_set} model metrics')\n print('---------------------------------')\n print(f'True positive rate for the model is {tpr:.2%}')\n print(f'False positive rate for the model is {fpr:.2%}')\n print(f'True negative rate for the model is {tnr:.2%}')\n print(f'False negative rate for the model is {fnr:.2%}\\n')", "def calc_metrics(model, X, y):\n\n # Get model predictions\n y_predict_r = model.predict(X)\n\n # Calculate evaluation metrics for assesing performance of the model.\n roc = roc_auc_score(y, y_predict_r)\n acc = accuracy_score(y, y_predict_r)\n prec = precision_score(y, y_predict_r)\n rec = recall_score(y, y_predict_r)\n f1 = f1_score(y, y_predict_r)\n\n return {\"acc\": acc, \"roc\": roc, \"prec\": prec, \"rec\": rec, \"f1\": f1}", "def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def metrics_evaluation(y_true, y_prob, threshold, df_type='train'):\n\n y_pred = (y_prob>=threshold).astype(int)\n \n tn = metrics.confusion_matrix(y_true, y_pred)[0][0]\n fp = metrics.confusion_matrix(y_true, y_pred)[0][1]\n fn = metrics.confusion_matrix(y_true, y_pred)[1][0]\n tp = metrics.confusion_matrix(y_true, y_pred)[1][1]\n\n accuracy_scr = metrics.accuracy_score(y_true, y_pred)\n precision_scr = metrics.precision_score(y_true, y_pred)\n recall_scr = metrics.recall_score(y_true, y_pred)\n f1_scr = metrics.f1_score(y_true, y_pred)\n roc_auc_scr = metrics.roc_auc_score(y_true, y_pred)\n\n result = {'Dataset': df_type, 'No obs': len(y_true), 'Threshold': threshold,\n 'TP':tp, 'FP': fp, 'TN': tn, 'FN':fn , \n 'Accuracy Score':accuracy_scr, 'Precision Score':precision_scr, \n 'Recall Score':recall_scr, 'F1 Score':f1_scr, 'ROC AUC Score':roc_auc_scr}\n\n return result", "def get_metrics(model, test_data):\n feats = test_data[:, :-1]\n gold_labels = test_data[:, -1]\n preds = model.predict_y(feats)\n preds_mean = preds[0].flatten()\n preds_var = preds[1]\n #print preds_mean[:10]\n #print gold_labels[:10]\n mae = MAE(preds_mean, gold_labels)\n rmse = np.sqrt(MSE(preds_mean, gold_labels))\n prs = pearson(preds_mean, gold_labels)\n nlpd = - np.mean(model.predict_density(feats, gold_labels[:, None]))\n return mae, rmse, prs, nlpd", "def score_calc(self, annotations, predictions):\n\n mean_probabilities_of_classes = np.expand_dims(np.mean(predictions, axis=0), axis=0)\n KL_d = predictions * (np.log(predictions + self.eps) - np.log(mean_probabilities_of_classes + self.eps))\n KL_D = KL_d.sum(axis=1)\n\n score = np.exp(np.mean(KL_D))\n return score", "def get_metrics(target, logits, one_hot_rep=True):\n\n if one_hot_rep:\n label = np.argmax(target, axis=1)\n predict = np.argmax(logits, axis=1)\n else:\n label = target\n predict = logits\n\n accuracy = accuracy_score(label, predict)\n\n precision = precision_score(label, predict)\n recall = recall_score(label, predict)\n f1_score_val = f1_score(label, predict)\n\n return accuracy, precision, recall, f1_score_val", "def evaluate(labels, predictions):\n # create 4 variables to represent sensitivity,specificity,total_positive values & total_negative values.\n sensitivity = float(0)\n specificity = float(0)\n\n total_positive = float(0)\n total_negative = float(0)\n\n # run through a for loop to evaluate the sensitivity and specificity of a data set\n for label, prediction in zip(labels, predictions):\n\n if label == 1:\n total_positive += 1\n if prediction == label:\n sensitivity += 1\n\n if label == 0:\n total_negative += 1\n if prediction == label:\n specificity += 1\n\n # data normalization\n sensitivity /= total_positive\n specificity /= total_negative\n\n return sensitivity, specificity", "def scores(self, y, y_pred):\n\n aucroc = 0.\n precision = 0.\n recall = 0.\n f1 = 0.\n aucroc_labs = np.zeros(self.datas[self.train_idx].n_labels)\n precision_labs = np.zeros(self.datas[self.train_idx].n_labels)\n recall_labs = np.zeros(self.datas[self.train_idx].n_labels)\n f1_labs = np.zeros(self.datas[self.train_idx].n_labels)\n label_ratios = np.mean(y, axis=0)\n\n if len(y) > 1:\n y_t = np.transpose(y)\n col_keep = np.ones(len(y_t), dtype=bool)\n for i, col_y in enumerate(y_t):\n if 0 not in col_y or 1 not in col_y:\n col_keep[i] = False\n\n if sum(col_keep) > 0:\n if not col_keep.all():\n y = np.transpose(y_t[col_keep])\n y_pred = np.transpose(np.transpose(y_pred)[col_keep])\n\n f1 = f1_score(y, self._round(y_pred), average=self.metrics_avg)\n s = f1_score(y, self._round(y_pred), average=None)\n f1_labs[col_keep] = s if sum(col_keep) > 1 else s[1]\n aucroc = roc_auc_score(y, y_pred, average=self.metrics_avg)\n aucroc_labs[col_keep] = roc_auc_score(y, y_pred, average=None)\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n recall = recall_score(y, self._round(y_pred), average=self.metrics_avg)\n if sum(col_keep) > 1:\n precision_labs[col_keep] = precision_score(y, self._round(y_pred), average=None)\n recall_labs[col_keep] = recall_score(y, self._round(y_pred), average=None)\n else:\n precision_labs[col_keep] = precision_score(y, self._round(y_pred))\n recall_labs[col_keep] = recall_score(y, self._round(y_pred))\n elif self.verbose:\n print('*Cannot compute other metrics because no label in Truth has alternatives, only precision*')\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n precision_labs = precision_score(y, self._round(y_pred), average=None)\n\n elif len(y) == 1:\n if self.verbose:\n print('*Cannot compute other metrics with %d samples, only precision*' % len(y))\n precision = precision_score(y, self._round(y_pred), average=self.metrics_avg)\n precision_labs = precision_score(y, self._round(y_pred), average=None)\n\n result = {\n 'aucroc': aucroc,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'aucroc_labs': aucroc_labs,\n 'precision_labs': precision_labs,\n 'recall_labs': recall_labs,\n 'f1_labs': f1_labs,\n 'label_ratios': label_ratios\n }\n\n return result", "def compute_metrics(self, results: Sequence[Dict]) -> Dict:\n\n preds = []\n gts = []\n for result in results:\n preds.append(result['pred_labels'])\n gts.append(result['gt_labels'])\n preds = torch.cat(preds)\n gts = torch.cat(gts)\n\n assert preds.max() < self.num_classes\n assert gts.max() < self.num_classes\n\n cared_labels = preds.new_tensor(self.cared_labels, dtype=torch.long)\n\n hits = (preds == gts)[None, :]\n preds_per_label = cared_labels[:, None] == preds[None, :]\n gts_per_label = cared_labels[:, None] == gts[None, :]\n\n tp = (hits * preds_per_label).float()\n fp = (~hits * preds_per_label).float()\n fn = (~hits * gts_per_label).float()\n\n result = {}\n if 'macro' in self.mode:\n result['macro_f1'] = self._compute_f1(\n tp.sum(-1), fp.sum(-1), fn.sum(-1))\n if 'micro' in self.mode:\n result['micro_f1'] = self._compute_f1(tp.sum(), fp.sum(), fn.sum())\n\n return result", "def eval_metrics_for_multiclass(self, predicted_answers):\n total_correct_in_all = 0\n total_pred_in_all = len(predicted_answers)\n # initial a dict for total correct in topK counting.\n total_correct_in_topK = dict([(i, 0) for i in self.topK_list])\n total_pred_in_topK = dict([(i, 0) for i in self.topK_list])\n max_topK = max(self.topK_list)\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids\n correct_label_indices = sample['correct_labels']\n # current case, we only have a majority lable for the correct label\n label_true.append(correct_label_indices[0])\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n sorted_probs_max_topK = sorted(sample['pred_probs'], reverse=True, key=lambda x: x['prob'])[:max_topK]\n top1_pred = sorted_probs_max_topK[0]\n label_pred.append(top1_pred['label_index'])\n\n # for all topK predictions\n for i in range(len(sorted_probs_max_topK)):\n pred = sorted_probs_max_topK[i]\n for topK in self.topK_list:\n if i >= topK:\n continue\n else:\n total_pred_in_topK[topK] += 1\n if pred['label_index'] in correct_label_indices:\n total_correct_in_topK[topK] += 1\n\n if total_correct_in_all != 0:\n # recall@K\n recall_at_K = dict([(k, total_correct_in_topK[k] / (total_correct_in_all * 1.0)) for k in self.topK_list])\n # assign recall@K into metrics\n for k, v in recall_at_K.items():\n # Jie\n # 1 means the greater the better.\n # -1 means the smaller the better.\n metrics['R@{}'.format(k)] = (1, v)\n\n self.logger.info('total_correct_in_all = {}, correct_in_topK = {}, recall@K = {}'.format(total_correct_in_all, sorted(total_correct_in_topK.items()), sorted(recall_at_K.items())))\n # here return all the p,r,f for each label, then we compute the micro average later.\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d',\n title='Confusion matrix, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d', normalize=True,\n title='Normalized confusion matrix')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics", "def record_eval_stats(self, labels: np.array, predictions: np.array, image_size: torch.Tensor):\n labels = torch.from_numpy(labels)\n predictions = torch.from_numpy(predictions)\n\n num_labels = len(labels)\n target_cls = labels[:, 0].tolist() if num_labels else []\n self.seen = self.seen + 1\n\n # Handle case where we get no predictions\n if predictions is None:\n if num_labels:\n self.eval_stats.append((torch.zeros(0, self.num_ious, dtype=torch.bool), torch.Tensor(), torch.Tensor(), target_cls))\n else:\n bboxes = xywh_to_xyxy(predictions[:, :4])\n scores = predictions[:, 4]\n class_pred = predictions[:, 5]\n\n clip_coords(bboxes, image_size)\n\n # Assign all predictions as inccorect\n correct = torch.zeros(predictions.shape[0], self.num_ious, dtype=torch.bool)\n\n if num_labels:\n detected = []\n target_cls_tensor = labels[:, 0]\n\n # target boxes\n target_box = xywh_to_xyxy(labels[:, 1:])\n\n # Per target class\n for cls in torch.unique(target_cls_tensor):\n target_indx = (cls == target_cls_tensor).nonzero(as_tuple=False).view(-1) # target indices\n pred_indx = (cls == predictions[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices\n\n # Search for detections\n if pred_indx.shape[0]:\n # Prediction to target ious\n best_ious, best_indxs = iou(predictions[pred_indx, :4], target_box[target_indx]).max(1) # best ious, indices\n # Appended detections\n detected_set = set()\n for iou_indx in (best_ious > self.iou_values[0]).nonzero(as_tuple=False):\n detected_target = target_indx[best_indxs[iou_indx]]\n if detected_target.item() not in detected_set:\n detected_set.add(detected_target.item())\n detected.append(detected_target)\n correct[pred_indx[iou_indx]] = best_ious[iou_indx] > self.iou_values # iou_thres is 1xn\n if len(detected) == num_labels: # all targets already located in image\n break\n\n self.eval_stats.append((correct.cpu(), scores.cpu(), class_pred.cpu(), target_cls))", "def classifiction_metric(preds, labels, label_list):\n\n acc = metrics.accuracy_score(labels, preds)\n\n labels_list = [i for i in range(len(label_list))]\n\n report = metrics.classification_report(\n labels, preds, labels=labels_list, target_names=label_list, digits=5, output_dict=True)\n\n return acc, report", "def evaluate(labels, predictions):\n pos = 0\n neg = 0\n true_pos_rate = 0\n true_neg_rate = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n pos += 1\n else:\n neg += 1\n if predictions[i] == labels[i]:\n if predictions[i] == 1:\n true_pos_rate += 1\n else:\n true_neg_rate += 1\n \n sensitivity = true_pos_rate / pos\n specificity = true_neg_rate / neg\n\n return (sensitivity, specificity)", "def evaluate(labels, predictions):\n\n # Positive and positive identified count\n pos = 0\n posid = 0\n\n # Negative and positive identified count\n neg = 0\n negid = 0\n\n for label, pred in zip(labels, predictions):\n if label == 1:\n pos += 1\n if pred == 1:\n posid += 1\n elif label == 0:\n neg += 1\n if pred == 0:\n negid += 1\n else:\n raise ValueError\n\n # `sensitivity` should be a floating-point value from 0 to 1\n # representing the \"true positive rate\": the proportion of\n # actual positive labels that were accurately identified.\n sens = float(posid / pos)\n\n # `specificity` should be a floating-point value from 0 to 1\n # representing the \"true negative rate\": the proportion of\n # actual negative labels that were accurately identified.\n spec = float(negid / neg)\n\n return (sens, spec)", "def _calculate_metrics(self, y_pred, y_true, detailed_result):\n\n y_right = [1 for (y_p, y_t) in zip(y_pred, y_true) if y_p == y_t]\n acc = len(y_right) / len(y_pred)\n if not detailed_result:\n return acc\n\n con_matrix = confusion_matrix(y_pred, y_true)\n if len(self.y_kinds) > 2:\n return con_matrix, [acc]\n else:\n tn = con_matrix[0][0]\n fp = con_matrix[0][1]\n fn = con_matrix[1][0]\n tp = con_matrix[1][1]\n p = tp + fn\n n = tn + fp\n sn = tp / p if p > 0 else None\n sp = tn / n if n > 0 else None\n pre = (tp) / (tp + fp) if (tp + fp) > 0 else None\n mcc = 0\n tmp = sqrt(tp + fp) * sqrt(tp + fn) * sqrt(tn + fp) * sqrt(tn + fn)\n if tmp != 0:\n mcc = (tp * tn - fp * fn) / tmp\n return con_matrix, [acc, sn, sp, pre, mcc]", "def evaluate(labels, predictions):\n #labels and predictions\n truePos = 0\n trueNeg = 0\n for data in range(len(labels)):\n if((predictions[data] == 1) and (predictions[data] == labels[data])):\n truePos+=1\n elif((predictions[data] == 0) and (predictions[data] == labels[data])):\n trueNeg+=1\n sensitivity = truePos/(len(labels) + 1)\n specificity = trueNeg/(len(labels) + 1)\n return (sensitivity, specificity)\n \n\n #raise NotImplementedError", "def compute_metrics(self, X: np.ndarray, y_true: list) -> dict:\n # TODO: apply softmax layer for q logits?\n\n q, _ = self.agent._target_q_network (X, training=False)\n\n # y_scores = np.max(q.numpy(), axis=1) # predicted scores (Q-Values)\n y_pred = np.argmax(q.numpy(), axis=1) # predicted class label\n\n metrics = custom_metrics(y_true, y_pred)\n\n return metrics", "def detection_analysis(y_pred, y_true):\n print(\"Precision: \", sm.precision_score(y_pred, y_true))\n print(\"Recall: \", sm.recall_score(y_pred, y_true))\n print(\"Accuracy: \", sm.accuracy_score(y_pred, y_true))\n print(\"\\n\")", "def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)", "def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)", "def multiclass_metrics(modelname, y_test, y_pred):\n multiclass_metrics = {\n 'Accuracy' : metrics.accuracy_score(y_test, y_pred),\n 'macro F1' : metrics.f1_score(y_test, y_pred, average='macro'),\n 'micro F1' : metrics.f1_score(y_test, y_pred, average='micro'),\n 'macro Precision' : metrics.precision_score(y_test, y_pred, average='macro'),\n 'micro Precision' : metrics.precision_score(y_test, y_pred, average='micro'),\n 'macro Recall' : metrics.recall_score(y_test, y_pred, average='macro'),\n 'micro Recall' : metrics.recall_score(y_test, y_pred,average='macro'),\n }\n \n df_metrics = pd.DataFrame.from_dict(multiclass_metrics, orient='index')\n df_metrics.columns = [model]\n\n \n \n return df_metrics", "def benchmark_metrics(posteriors: np.ndarray,\n observed_labels: np.ndarray,\n metric_name: str,\n true_labels: np.ndarray) -> Union[float, List[float]]:\n predictions = np.argmax(posteriors, axis=1)\n\n # Accuracy averaged across all classes\n if metric_name == \"accuracy\":\n return np.mean(predictions == observed_labels) * 100.0\n # Cross-entropy loss across all samples\n elif metric_name == \"top_n_accuracy\":\n N = 2\n sorted_class_predictions = np.argsort(posteriors, axis=1)[:, ::-1]\n correct = int(0)\n for _i in range(observed_labels.size):\n correct += np.any(sorted_class_predictions[_i, :N] == observed_labels[_i])\n return correct * 100.0 / observed_labels.size\n elif metric_name == \"cross_entropy\":\n return np.mean(cross_entropy(posteriors, np.eye(10)[observed_labels]))\n # Average accuracy per class - samples are groupped based on their true class label\n elif metric_name == \"accuracy_per_class\":\n vals = list()\n for _class in np.unique(true_labels, return_counts=False):\n mask = true_labels == _class\n val = np.mean(predictions[mask] == observed_labels[mask]) * 100.0\n vals.append(np.around(val, decimals=3))\n return vals\n else:\n raise ValueError(\"Unknown metric\")", "def calc_performance_metrics(\n labels: np.ndarray,\n predictions: np.ndarray,\n decimal_points: Optional[int] = 4) -> _PerformanceMetrics:\n utils.assert_label_and_prediction_length_match(labels,\n predictions)\n\n mse = metrics.mean_squared_error(labels, predictions)\n rmse = np.sqrt(mse)\n msle = np.sqrt(metrics.mean_squared_log_error(labels, predictions))\n mae = metrics.mean_absolute_error(labels, predictions)\n mape = metrics.mean_absolute_percentage_error(labels, predictions)\n r2 = metrics.r2_score(labels, predictions)\n corr = sp.stats.pearsonr(labels, predictions)[0]\n\n return _PerformanceMetrics(\n mean_squared_error=round(mse, decimal_points),\n root_mean_squared_error=round(rmse, decimal_points),\n mean_squared_log_error=round(msle, decimal_points),\n mean_absolute_error=round(mae, decimal_points),\n mean_absolute_percentage_error=round(mape, decimal_points),\n r_squared=round(r2, decimal_points),\n pearson_correlation=round(corr, decimal_points))", "def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity", "def performance_metrics(self, y, y_predicted, type='mse'):\n if type== 'mse':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum((y_predicted-y)**2, axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum((y_predicted-y)**2)/(len(y))\n metric = mean_over_output_elements\n\n elif type == 'mae':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum(np.abs(y_predicted-y), axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum(np.abs(y_predicted-y))/(len(y))\n metric = mean_over_output_elements\n\n elif type == 'msle':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum((np.log(1 +y_predicted)-np.log(1+y))**2, axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum((np.log(1 +y_predicted)-np.log(1+y))**2)/(len(y))\n metric = mean_over_output_elements\n\n elif type == 'mape':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum(np.abs(y_predicted-y)/np.maximum(1e-8,np.abs(y)), axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets\n else: \n mean_over_output_elements = np.sum(np.abs(y_predicted-y)/np.maximum(1e-8,np.abs(y)))/(len(y))\n metric = mean_over_output_elements \n \n elif type == 'r2':\n if y.ndim > 1:\n n = np.shape(y)[0] #number of samples\n m = np.shape(y)[1] #number of output elements\n y_mean_over_output_elements = np.sum(y, axis=0)/m\n y_mean = y_mean_over_output_elements\n r2_over_output_elements = (np.sum((y-y_predicted)**2, axis=0))/((np.sum((y-y_mean)**2, axis=0)))\n r2_over_output_elements = np.sum(r2_over_output_elements)/n\n metric = 1 - r2_over_output_elements\n else: \n m = 1 #number of samples\n n = np.shape(y)[0] #number of output elements\n y_mean_over_output_elements = np.sum(y, axis=0)/n\n y_mean = y_mean_over_output_elements\n r2_over_output_elements = (np.sum((y-y_predicted)**2, axis=0))/(np.sum((y-y_mean)**2, axis=0))\n r2_over_output_elements = np.sum(r2_over_output_elements)\n metric = 1 - r2_over_output_elements\n elif type == 'rmse':\n if y.ndim > 1:\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n mean_over_output_elements = np.sum((y_predicted-y)**2, axis=1)/n\n mean_over_all_datasets = np.sum(mean_over_output_elements)/m\n metric = mean_over_all_datasets**(1/2)\n else: \n mean_over_output_elements = np.sum((y_predicted-y)**2)/(len(y))\n metric = mean_over_output_elements**(1/2)\n else:\n raise ValueError(\"undefined metric\")\n return metric", "def label_accuracy_score(label_true, label_pred, n_class):\n hist = _fast_hist(label_true.flatten(), label_pred.flatten(), n_class)\n acc = np.diag(hist).sum() / hist.sum().astype(np.float64)\n acc_cls = np.diag(hist) / hist.sum(axis=1).astype(np.float64)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)).astype(np.float64)\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum().astype(np.float64)\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def compute_metrics(self):\n pass", "def calculate_metrics(outputs, targets):\n pred = outputs\n\n # Top-k prediction for TAg\n hits_tag_top5 = compute_topk_acc(pred, targets, 5)\n hits_tag_top1 = compute_topk_acc(pred, targets, 1)\n\n return hits_tag_top5.item(), hits_tag_top1.item()", "def _compute_metrics(hits_or_lcs: int, pred_len: int, target_len: int) ->Dict[str, Tensor]:\n precision = hits_or_lcs / pred_len\n recall = hits_or_lcs / target_len\n if precision == recall == 0.0:\n return dict(precision=tensor(0.0), recall=tensor(0.0), fmeasure=tensor(0.0))\n fmeasure = 2 * precision * recall / (precision + recall)\n return dict(precision=tensor(precision), recall=tensor(recall), fmeasure=tensor(fmeasure))", "def evaluate(self, predicted_df):\n logging.info(\"Evaluating model: {}\".format(self.model_type))\n y_true = predicted_df[\"user_label\"].as_matrix()\n y_pred = predicted_df[\"label\"].as_matrix()\n\n scores_cols = [col for col in predicted_df.columns if col.startswith(\"scores_\")]\n print(\"scores_cols: {}\".format(scores_cols))\n\n y_pred_scores = predicted_df[scores_cols].copy().fillna(value=0).as_matrix()\n print(\"predicted scores: {}\".format(y_pred_scores))\n y_true_scores = []\n for lab in predicted_df[\"user_label\"]:\n trues = [0 for _ in range(len(scores_cols))]\n if \"scores_\"+lab in scores_cols:\n trues[scores_cols.index(\"scores_\"+lab)] = 1\n y_true_scores.append(trues)\n print(\"true scores: {}\".format(y_true_scores))\n y_true_scores = np.array(y_true_scores)\n\n performance = {\"model\": self.model_type, \"description\": self.description}\n if 'categorical_accuracy' in self.metrics:\n logging.info(\"Calculating categorical accuracy for {}\".format(self))\n performance['categorical_accuracy'] = sklearn.metrics.accuracy_score(y_true,\n y_pred) # np.mean(y_pred == y_true)\n if 'fmeasure' in self.metrics:\n logging.info(\"Calculating fmeasure for {}\".format(self))\n performance['fmeasure'] = sklearn.metrics.f1_score(y_true, y_pred, average=self.metrics_average)\n if 'MRR' in self.metrics:\n logging.info(\"Calculating MRR for {}\".format(self))\n performance['MRR'] = sklearn.metrics.label_ranking_average_precision_score(y_true_scores, y_pred_scores)\n logging.info(\"Calculated performance: {}\".format(performance))\n print(performance)\n return pd.DataFrame(performance, index=[0])", "def metrics(true, predictions):\n metrics = pd.DataFrame(columns=['Metric Value'])\n metrics.loc['MAE'] = mean_absolute_error(true, predictions)\n metrics.loc['RMSE'] = rmse(true, predictions)\n metrics.loc['R2'] = r2_score(true, predictions)\n metrics.loc['MAPE'] = mape(true, predictions)\n metrics.loc['sMAPE'] = smape(true, predictions)\n \n return metrics", "def eval_metrics_for_seqtags(self, predicted_answers):\n total_correct_in_all = 0\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids, include padding ids.\n correct_label_indices = sample['correct_seq_labels']\n # use extend to add all the labels in the seq, include the head padding and tail padding\n label_true.extend(correct_label_indices)\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n label_pred.extend(sample['pred_seq_tags'])\n\n if total_correct_in_all != 0:\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes,\n title='Confusion matrix when seq labeling, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, normalize=True,\n title='Normalized confusion matrix when seq labeling')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics", "def error_rate(predictions, labels):\n return 100.0 - (100*(np.sum(predictions == labels)/float(predictions.shape[0]*predictions.shape[1])))", "def accumulate(self, predictions, labels, loss):\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_f1score = calculate_f1score(predictions, labels)\n mean_f2score = calculate_f2score(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_loss = np.mean(loss)\n\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_f1score += mean_f1score * batch_size\n self.sum_f2score += mean_f2score * batch_size\n self.sum_loss += mean_loss * batch_size\n\n return {\"hit_at_one\": mean_hit_at_one, \"perr\": mean_perr, \"f1score\": mean_f1score, \"f2score\": mean_f2score, \"loss\": mean_loss}", "def log_inference_metrics(prediction, probabilities, testY, testX):\n F1score = f1_score(testY,prediction)\n AUPRC = average_precision_score(testY, probabilities)\n tn, fp, fn, tp = confusion_matrix(testY,prediction).ravel()\n\n sigopt.log_metric('AUPRC test', average_precision_score(testY, probabilities))\n sigopt.log_metric('F1score test', F1score)\n sigopt.log_metric('False Positive test', fp)\n sigopt.log_metric('False Negative test', fn)\n sigopt.log_metric('True Positive test', tp)\n sigopt.log_metric('True Negative test', tn)\n sigopt.log_metric('Max $ Missed Fraudulent', max_missed_fraud(prediction, testY, testX['amount']))\n sigopt.log_metric('Max $ Missed Valid', max_missed_valid(prediction, testY, testX['amount']))\n\n return F1score, AUPRC, tn, fp, fn, tp", "def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score", "def run_analyses(y_predict_train, y_train, y_predict, y_test):\n # calculate metrics\n _, training_error = output_error(y_predict_train, y_train)\n (precision, recall, f1, _), testing_error = output_error(y_predict, y_test)\n \n # print out metrics\n print 'Average Precision:', np.average(precision)\n print 'Average Recall:', np.average(recall)\n print 'Average F1:', np.average(f1)\n print 'Training Error:', training_error\n print 'Testing Error:', testing_error", "def accuracy_compute(predictions, labels):\n with tf.name_scope('test_accuracy'):\n accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]\n tf.summary.scalar('test_accuracy', accu)\n return accu", "def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics", "def predictions_conf(self):\n return self._pred_L, self._pred_R", "def accuracy(predictions, targets):\n return accuracy", "def precision(y_true, y_pred, average, labels):\n y_true, y_pred = check_metric_args(y_true, y_pred, average, labels)\n\n # At this point, you can be sure that y_true and y_pred are one hot encoded.\n result = None\n m = len(y_true)\n n = len(labels)\n\n #call get_confusion_matrix function and put the result in confusion_matrix\n confusion_matrix = get_confusion_matrix(y_true, y_pred, labels)\n\n #compute the result if using micro-averages\n if average == \"micro\":\n numerator = np.trace(confusion_matrix)\n denominator = np.sum(confusion_matrix)\n result = numerator/denominator\n\n #compute the precision independently for each class and then take the average \n elif average == \"macro\":\n diag = np.diag(confusion_matrix)\n row_sums = np.sum(confusion_matrix, axis=1)\n row_sums_adjusted = np.array([1 if val == 0 else val for val in row_sums])\n result = np.mean(diag/row_sums_adjusted)\n\n else:\n diag = np.diag(confusion_matrix)\n row_sums = np.sum(confusion_matrix, axis=1)\n row_sums_adjusted = np.array([1 if val == 0 else val for val in row_sums])\n result = diag/row_sums_adjusted\n\n return result", "def compute_map(labels,predictions):\n try:\n labels = tf.reshape(labels, [tf.shape(labels)[0], 1])\n # assert labels.get_shape()[1] == 1\n # assert predictions.get_shape()[1] == 2\n # assert labels.get_shape()[0] == predictions.get_shape()[0]\n except Exception:\n labels = np.reshape(labels, [labels.shape[0], 1])\n # assert labels.shape[1] == 1\n # assert predictions.shape[1] == 2\n # assert labels.shape[0] == predictions.shape[0]\n\n # labels = np.array([[0], [0], [1], [1], [1], [0]])\n # y_true = labels.astype(np.int64)\n y_true = tf.identity(labels)\n\n # predictions = np.array([[0.1, 0.2],\n # [0.8, 0.05],\n # [0.3, 0.4],\n # [0.6, 0.25],\n # [0.1, 0.2],\n # [0.9, 0.0]])\n # y_pred = predictions.astype(np.float32)\n y_pred = tf.identity(predictions)\n\n # _, m_ap = tf.metrics.average_precision_at_k(y_true, y_pred, 1)\n _, m_ap = tf.metrics.average_precision_at_k(y_true, y_pred, 5)\n return m_ap", "def get_classification_metrics(features, true_output, model):\n accuracy = model.score(features, true_output)\n guess = model.predict(features)\n precision = metrics.precision_score(true_output, guess)\n recall = metrics.recall_score(true_output, guess)\n return accuracy, precision, recall", "def accuracy(predictions, labels):\n predictions = list(predictions)\n labels = list(labels)\n count = 0\n for i in range(len(labels)):\n if labels[i] == predictions[i]:\n count += 1\n\n return count / len(labels)", "def evaluate_prediction(classifier, test_data, labels):\n \n predictions = classifier.predict(test_data)\n \n return accuracy_score(labels, predictions)", "def build_eval_metrics(self, predict_ids, labels, nwords, params):\n raise NotImplementedError()", "def compute_metrics(\n self,\n preds: Dict[str, torch.Tensor],\n targets: Dict[str, torch.Tensor],\n phase: str,\n ) -> Dict[str, torch.Tensor]:\n if phase == \"train\":\n metrics_dict = self.train_metrics\n elif phase == \"val\":\n metrics_dict = self.val_metrics\n elif phase == \"test\":\n metrics_dict = self.test_metrics\n\n ret = {}\n for metric_name, metric in metrics_dict.items():\n if metric is not None:\n branch = metric_name.split(\"_\")[0]\n ret[metric_name] = metric(preds[branch], targets[branch])\n\n return ret", "def __evaluate_other_metrics(dataset, m, y_act, y_pred):\n return evaluate_metric(y_act, y_pred, m, dataset.y_n_classes)", "def score(self, X, y):\n predictions = self.predict(X)\n total_values = len(y)\n accuracy = 0\n if 'classification' == self.label_type:\n correct_values = np.where(predictions == y)\n accuracy = correct_values[0].size / total_values\n elif 'regression' == self.label_type:\n sse = (y - predictions) ** 2\n sse_summed = np.sum(sse)\n accuracy = sse_summed / total_values\n\n return accuracy", "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])", "def score_prediction(y_true, y_pred):\n return [round(metrics.accuracy_score(y_true, y_pred)*100, 2),\n round(metrics.precision_score(y_true, y_pred)*100, 2),\n round(metrics.recall_score(y_true, y_pred)*100, 2),\n round(metrics.f1_score(y_true, y_pred)*100, 2)]", "def get_accuracy(pred, test_label, regression= \"logistic\"):\n if regression == \"multiclass\":\n pred_max = np.argmax(pred, axis=1)\n gt_max = np.argmax(test_label, axis=1)\n acc = np.sum(pred_max == gt_max)*100.0/pred.shape[0]\n elif regression == \"logistic\" or regression == \"probit\":\n if pred.ndim == 2:\n pred = pred[:,0]\n pred[pred >= 0.5] = 1.0\n pred[pred < 0.5] = 0.0\n acc = np.sum(pred == test_label)*100.0/pred.shape[0]\n\n return acc", "def accuracy(labels, predictions):\n if len(labels) != len(predictions):\n return -1\n\n correct = 0\n total = 0\n\n for i,v in enumerate(predictions):\n if labels[i] == str(v):\n correct += 1\n total += 1\n\n return (float(correct) / float(total)) * 100.0", "def evaluate(labels, predictions):\n\n true_positives = 0\n label_positives = 0\n\n true_negatives = 0\n label_negatives = 0\n\n for i in range(len(predictions)):\n if labels[i] == predictions[i] == 1:\n true_positives += 1\n if labels[i] == 1:\n label_positives += 1\n\n if labels[i] == predictions[i] == 0:\n true_negatives += 1\n if labels[i] == 0:\n label_negatives += 1\n\n return true_positives / label_positives, true_negatives / label_negatives\n\n # raise NotImplementedError", "def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))", "def define_prediction(self):\n with self.graph.as_default():\n self.predict_label = tf.argmax(self.l7, 1)\n self.predict_prob = tf.nn.softmax(self.l7)\n self.correct_label = tf.argmax(self.label, 1)\n self.accuracy = tf.reduce_mean(\n tf.cast(\n tf.equal(\n self.predict_label,\n self.correct_label\n ),\n tf.float32\n )\n )\n\n # Aggiungo accuracy all'elenco del sommario per tensorboard\n tf.summary.scalar(\"accuracy\", self.accuracy)\n return self.accuracy, self.predict_label", "def calculate_metrics(predictions, expected):\n # type: (np.ndarray, np.ndarray) -> (float, float, float)\n clients_count = predictions.shape[0]\n products_count = predictions.shape[1]\n\n true_positive = 0.0\n true_negative = 0.0\n false_positive = 0.0\n false_negative = 0.0\n\n total = float(clients_count * products_count)\n\n for c in range(0, clients_count):\n for p in range(0, products_count):\n if predictions[c, p] == expected[c, p]:\n if predictions[c, p] == 1:\n true_positive += 1\n else:\n true_negative += 1\n else:\n if predictions[c, p] == 1:\n false_positive += 1\n else:\n false_negative += 1\n\n accuracy = float(true_positive + true_negative) / total\n if true_positive + false_positive == 0:\n precision = 0\n else:\n precision = true_positive / float(true_positive + false_positive)\n\n if true_positive + false_negative == 0:\n recall = 0\n else:\n recall = true_positive / float(true_positive + false_negative)\n\n return accuracy, precision, recall", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n _, y_pred = predictions.max(dim=1)\n accuracy = (y_pred == targets).sum().item() / n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def classification_metrics(self, target_data, predicted):\n from sklearn import preprocessing\n from sklearn import metrics \n\n y_true_copy, predictions = pd.DataFrame(self.target_data), predicted\n #y_true_copy.unique()\n np.unique(y_true_copy)\n encode = {}\n for i in range(len(np.unique(y_true_copy))):\n encode[np.unique(y_true_copy)[i]] = i\n \n predicted_copy = [encode[i] for i in predictions]\n \n y_true_copy.replace(encode, inplace=True)\n \n if len(y_true_copy) != 0:\n #Accuracy\n accuracy = round(metrics.accuracy_score(y_true_copy, predicted_copy),2) \n #Precision\n precision = round(metrics.precision_score(y_true_copy, predicted_copy, zero_division=1),2) \n #Recall\n recall = round(metrics.recall_score(y_true_copy, predicted_copy, zero_division=1),2)\n tn, fp, fn, tp = metrics.confusion_matrix(y_true_copy, predicted_copy).ravel()\n #False Positive Rate (FPR)\n fpr = round((fp/(fp+tn)),2)\n #Flase Negative Rate (FNR)\n fnr = round((fn/(tp+fn) if (tp+fn) else 0),2) \n results = {'accuracy':accuracy, 'precision':precision, 'recall':recall, 'fpr': fpr, 'fnr':fnr}\n return results\n else:\n raise Exception(\"Metrics calculation failed\")", "def classification_metric_fn(log_probs, label, is_real_example):\n log_probs = tf.reshape(log_probs, [-1, log_probs.shape[-1]])\n pred = tf.argmax(\n input=log_probs, axis=-1, output_type=tf.int32)\n\n label = tf.reshape(label, [-1])\n accuracy = tf.compat.v1.metrics.accuracy(\n labels=label, predictions=pred, weights=is_real_example)\n\n precision = tf.compat.v1.metrics.precision(\n labels=label, predictions=pred, weights=is_real_example)\n\n recall = tf.compat.v1.metrics.recall(\n labels=label, predictions=pred, weights=is_real_example)\n\n return {\n \"accuracy\": accuracy,\n \"precision\": precision,\n \"recall\": recall,\n }", "def label_accuracy_score(label_trues, label_preds, n_class):\n hist = np.zeros((n_class, n_class))\n # 一个batch里面可能有多个数据\n # 通过迭代器将一个个数据进行计算\n for lt, lp in zip(label_trues, label_preds):\n # numpy.ndarray.flatten将numpy对象拉成1维\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n\n # np.diag(a)假如a是一个二维矩阵,那么会输出矩阵的对角线元素\n # np.sum()可以计算出所有元素的和。如果axis=1,则表示按行相加\n \"\"\"\n acc是准确率 = 预测正确的像素点个数/总的像素点个数\n acc_cls是预测的每一类别的准确率(比如第0行是预测的类别为0的准确率),然后求平均\n iu是召回率Recall,公式上面给出了\n mean_iu就是对iu求了一个平均\n freq是每一类被预测到的频率\n fwavacc是频率乘以召回率,我也不知道这个指标代表什么\n \"\"\"\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n # nanmean会自动忽略nan的元素求平均\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc", "def compute_accuracy(labels=None, predictions=None):\n labels = np.array(labels, dtype=np.int32)\n if len(labels.shape) == 2:\n labels = np.argmax(labels, -1)\n return np.sum(np.equal(labels, predictions)) / np.size(labels)", "def score(self, y_true, y_pred):\r\n pass", "def compute_metrics(self, target, data, weight):\n pred = self.predict(data, weight)\n assert len(pred) == len(target)\n # Calculate the mis-classification rate:\n N = len(pred)\n pred = np.reshape(pred, (N,))\n target = np.reshape(target, (N,))\n nb_misclass = np.count_nonzero(target - pred)\n return nb_misclass / N", "def binary_metrics(predicted_labels, labels):\n accuracy = tf.contrib.metrics.accuracy(predicted_labels, labels)\n exp_moving_average = tf.train.ExponentialMovingAverage(decay=0.99)\n update_op = exp_moving_average.apply([accuracy])\n tf.summary.scalar('accuracy', accuracy)\n tf.summary.scalar('accuracy_moving_average',\n exp_moving_average.average(accuracy))\n return update_op", "def accuracy(labels, predictions, n_classes):\n\t\tequality = tf.equal(x = predictions, y = labels) # match the type of labels\n\t\treturn tf.reduce_mean(tf.cast(equality, tf.float32))", "def get_metrics(Ytest, Ytest_pred):\n TN, FP, FN, TP = confusion_matrix(Ytest, Ytest_pred,\n labels=[majClass, minClass]).ravel()\n return TN, FP, FN, TP", "def evaluate(predicted, labels):\n \n assert len(predicted) == len(labels), \"Different number of predictions and labels.\"\n \n total = len(predicted)\n movie_correct = 0\n location_correct = 0\n \n center_frame_dist = [] \n overlaps = []\n \n for pred, label in zip(predicted, labels):\n \n dist = 0\n \n if pred[0] == label[0]: # Check if movie is correct\n movie_correct += 1\n \n dist = abs(pred[1] - ((label[1]+label[2])/2)) \n center_frame_dist.append(dist)\n \n correct = False\n if label[1] <= pred[1] <= label[2]:\n correct = True\n location_correct += 1\n\n \n# print(\"Label: ({:s}, {:d}, {:d}), predicted: ({:s}, {:d}), location correct: {!s:}, start_frame_dist: {:d}, overlap: {:d}\".format(\n# *label,\n# *pred,\n# correct,\n# dist\n# ))\n \n # Return (# movies correct, # correct location, # total movies) and (avg start frame distance, std)\n return (movie_correct, location_correct, total), (np.mean(center_frame_dist), np.std(center_frame_dist))", "def count_ner_labels(self, y_true, y_pred):\n return Counter(y_true), Counter(y_pred)", "def post_process_predictions(self, labels: Labels, scene: Scene) -> Labels:\n return labels", "def calc_acc_metrics(preds, teY):\n\n prec = precision_score(teY, preds)\n rec = recall_score(teY, preds)\n acc = accuracy_score(teY, preds)\n f1 = f1_score(teY, preds)\n\n conf_matrix = confusion_matrix(teY, preds)\n fpr = conf_matrix[0][1] / (conf_matrix[0][1] + conf_matrix[0][0])\n\n return prec, rec, acc, f1, fpr", "def __init__(self):\n\n # List of all the class labels\n self.labels = [0, 1, 2, 3]\n\n # Dictionary to store count of each label in predicted labels list\n self.total_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n # Dictionary to store count of each label in actual labels list\n self.total_actual_count = {0: 0, 1: 0, 2: 0, 3: 0}\n\n # Dictionary to store count of correctly predicted labels\n self.total_correct_prediction_count = {0: 0, 1: 0, 2: 0, 3: 0}" ]
[ "0.7634908", "0.7342889", "0.72701824", "0.71024024", "0.70171285", "0.70013344", "0.698941", "0.6960283", "0.6859667", "0.6853133", "0.68380344", "0.6824996", "0.6816467", "0.6797667", "0.67882746", "0.6784857", "0.67818296", "0.6759894", "0.6757059", "0.6757059", "0.6755217", "0.6733315", "0.6727328", "0.6725676", "0.6712994", "0.6694949", "0.66500765", "0.6648719", "0.66442674", "0.663958", "0.6625182", "0.6622401", "0.6617484", "0.6615149", "0.6612085", "0.66026646", "0.6601495", "0.66011053", "0.6591381", "0.65902305", "0.6576817", "0.65536124", "0.65463203", "0.6546215", "0.65425414", "0.6540649", "0.6538149", "0.65262103", "0.6523653", "0.6508419", "0.649318", "0.6492467", "0.64924246", "0.64814955", "0.64796025", "0.6478089", "0.6476874", "0.64744794", "0.64675707", "0.6457207", "0.64493763", "0.6447418", "0.64467037", "0.643747", "0.64343035", "0.6430529", "0.6423525", "0.6408174", "0.6407613", "0.64072055", "0.64069694", "0.6397916", "0.63973147", "0.6388783", "0.6384901", "0.63818365", "0.63749504", "0.6365048", "0.6364654", "0.63591594", "0.6352132", "0.6349", "0.6347045", "0.63393533", "0.6335804", "0.6329286", "0.6325888", "0.6323776", "0.63236743", "0.6318697", "0.6317714", "0.6311224", "0.6310337", "0.6309441", "0.63085246", "0.6297145", "0.6283477", "0.6279802", "0.62719625", "0.62677747" ]
0.6285268
96
True if parameter is active, i.e. its value differs from default.
def __bool__(self): # Do explicit cast to bool, as value can be a NumPy type, resulting in # an np.bool_ type for the expression (not allowed for __bool__) return bool(self.value != self.default_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isActiveFitParam(param):\n return isFitParam(param) and param.isActive()", "def getBoolParam(self, params, name):\n return params.get(name) in ('True', 'true', '1')", "def active(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"active\")", "def check_active(value):\r\n\tif value == \"False\":\r\n\t\treturn False\r\n\telse:\r\n\t\treturn True", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def default_active():\n return True", "def param_set(params, param):\n if param in params:\n if params[param] is True:\n return True\n return False", "def has_default_parameters(self):\n return self._compat_flags[0] & (0x1 << 0)", "def is_active(self) -> bool:", "def has_parameter(self, name):\n for par in self.params:\n if par.name == name:\n return True\n return False", "def is_active(self) -> bool:\n return self.active == \"active\"", "def _check_valid_basic(self, get_params):\n try:\n if get_params(self.variable):\n return self.default\n except: # noqa e722\n pass\n return not self.default", "def isParameter(self):\n return _libsbml.Rule_isParameter(self)", "def has_custom_param(plot):\n return Plot.has_custom_param(plot)", "def has(self, param):\n\n if param in self.params:\n return True\n\n return False", "def no_params(self) -> bool:\n result = True\n # Fixing issue #92\n if self.properties.parameters:\n return False\n else:\n return True\n # for parameter in self.properties.parameters:\n # if parameter == \"effect\":\n # continue\n # else:\n # result = False\n # break\n # return result", "def is_active():\n return True", "def test_getboolean_with_default(self):\n self.assertEqual(self.config.getboolean('advanced','p'),None)\n self.assertEqual(self.config.getboolean('advanced','p',True),True)", "def params_optional(self) -> bool:\n result = True\n if self.no_params:\n # We will return False, because there are no params at all - optional or not.\n return False\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n # We should allow you to print out the options to a YAML file and fill it out like a form.\n # So right now, it will create a long Kubernetes policy, but it will have lots of empty lists that we have to fill out. Oh well.\n if not parameter_details.default_value:\n # if not parameter.default_value and parameter.default_value != [] and parameter.default_value != \"\":\n result = False\n break\n return result", "def active(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"active\")", "def hidden_param(self,name):\n # interpret a precedence of None as 0\n precedence = self.get_parameter_object(name).precedence or 0\n return precedence<self.display_threshold", "def active(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"active\")", "def is_valid(self):\n return self.is_active", "def check_parameter(cls, par: str, value):\n\n global dtParameterDesc\n if par not in dtParameterDesc:\n return False\n\n pardata = dtParameterDesc[par]\n\n if isinstance(value, str):\n try:\n value = float(value.replace(',', '.')) * dtg.units[pardata['dunit']]['multiple']\n except ValueError:\n return False\n\n if pardata['type'] is Integral and value != int(value):\n return False\n\n if 'uplim' in pardata and (value > pardata['uplim'] or value < pardata['lowlim']):\n return False\n\n return True", "def active(self):\n return self.starting == 0 and self.stopped == 0", "def is_active(self):\r\n return self.active", "def active(self) -> bool:\n return pulumi.get(self, \"active\")", "def is_active(self):\r\n return True", "def is_on(self):\n return bool(getattr(self.resource, self.variable))", "def active(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"active\")", "def has_param_with_name(self, param_name):\n return param_name in self.params", "def _parse_param_as_bool(\n enodeb: EnodebAcsStateMachine,\n param_name: ParameterName\n) -> str:\n try:\n param = enodeb.get_parameter(param_name)\n pval = param.lower().strip()\n if pval in {'true', '1'}:\n return '1'\n elif pval in {'false', '0'}:\n return '0'\n else:\n logging.warning(\n '%s parameter not understood (%s)', param_name, param)\n return '0'\n except (KeyError, ConfigurationError):\n return '0'", "def __bool__(self):\n return bool(self.get_value())", "def is_active(self) -> bool:\r\n return self.active", "def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_endpoint_parameters_enabled\")", "def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_endpoint_parameters_enabled\")", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_enabled(self):", "def is_active(self):\n return self.active", "def is_active(self):\n return self.active", "def is_active(self):\n return self.active", "def is_active(self):\n return self.active", "def true_param(p):\n return (not p.startswith('limit_') and\n not p.startswith('error_') and\n not p.startswith('fix_'))", "def isActive(self):\n pass", "def _mixed_precision_enabled_for_params(self) -> bool:\n return self.mixed_precision.param_dtype is not None", "def convert_boolean(cls, param, value):\r\n return True", "def isSetConstant(self):\n return _libsbml.Parameter_isSetConstant(self)", "def is_valide(self):\n if self.arguments:\n return True\n else:\n return False", "def is_active(self):\n if self.load_status == \"I\":\n return True\n return False", "def IsActive(self):\n return True", "def IsActive(self):\r\n\r\n return self.active", "def negotiated(self) -> bool:\n return self.__parameters.negotiated", "def _check_boolean_value(arg_dict, key):\n to_check_value = arg_dict[key].lower()\n if to_check_value in ['disabled', 'enabled']:\n return 0\n else:\n return -1", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def isDefault (self):\n val = self.__getattribute__('StoredValue')\n dft = self.__class__.StoredValue\n return val==dft", "def is_active(self):\n group_names = self.get_var(\"group_names\", default=[])\n master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names\n return super(OvsVersion, self).is_active() and master_or_node", "def active(self):\n\n return True", "def isActive(state):\n return state in [State.enabled, State.softDisabling]", "def param(self, parameter):\n\n if parameter in self.url_params:\n return self.url_params[parameter]\n return False", "def _check_whether_has_params(self, params) -> bool:\n\n if params:\n return True\n return False", "def is_active(self):\n return self == self.item.active_take", "def test_active_on(self):\n\n self.feature_test.set_percentage(100)\n self.assertTrue(self.feature_test.is_active)", "def __is_active(self, command):\n return True", "def check_passive(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"check_passive\")", "def check_passive(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"check_passive\")", "def _check_helper(self, value, raise_exceptions: bool = True) -> bool:\n return super(ParameterMixin, self)._check_helper(\n value, raise_exceptions=raise_exceptions\n )", "def is_always_active(self) -> bool:\n if len(self.active_periods) == 0:\n return True\n\n if len(self.active_periods) == 1:\n period = self.active_periods[0]\n if period.lower == 0 and period.upper == 24000:\n return True\n\n return False", "def isActive(obj):\n wf = getToolByName(obj, 'portal_workflow')\n if (hasattr(obj, 'inactive_state') and obj.inactive_state == 'inactive') or \\\n wf.getInfoFor(obj, 'inactive_state', 'active') == 'inactive':\n return False\n if (hasattr(obj, 'cancellation_state') and obj.inactive_state == 'cancelled') or \\\n wf.getInfoFor(obj, 'cancellation_state', 'active') == 'cancelled':\n return False\n return True", "def bool_option (arg: Any) -> bool:\n return True", "def applicable(self, input_parameter: str) -> bool:\n raise NotImplementedError()", "def is_active(key, *args):\n return operator.is_active(key, *args)", "def Enabled(self) -> bool:", "def isSetValue(self):\n return _libsbml.Parameter_isSetValue(self)", "def test_default(self):\r\n self.assertEqual(self.option.default, False)", "def isDefault(self) -> bool:\n ...", "def isDefault(self) -> bool:\n ...", "def isDefault(self) -> bool:\n ...", "def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False", "def updateParameters(self):\n\n if self.params[1].value:\n if arcpy.Exists(self.params[1].value):\n try:\n min_value = arcpy.GetRasterProperties_management(self.params[1].value, \"MINIMUM\")[0]\n\n if str(self.params[8].value) != str(self.params[1].value):\n self.params[7].value = True\n self.params[8].value = str(self.params[1].value)\n else:\n self.params[7].value = False\n\n if str(min_value) == \"0\":\n if self.params[7].value == True:\n self.params[2].value = True\n self.params[3].enabled = True\n self.params[7].value = False\n else:\n self.params[2].value = False\n self.params[3].enabled = False\n\n except arcpy.ExecuteError:\n pass\n\n if self.params[2].value == True:\n self.params[3].enabled = True\n else:\n self.params[3].enabled = False", "def test_property_active(self):\n\n active = self.location.active\n\n self.assertIsInstance(active, bool)\n self.assertRaises(DataObjectError,\n setattr(self, \"active\", False)\n )" ]
[ "0.72914785", "0.7089384", "0.6749947", "0.6713717", "0.6666445", "0.6666445", "0.6666445", "0.6666445", "0.66639507", "0.66464436", "0.6492625", "0.6490066", "0.64801526", "0.6458125", "0.64275116", "0.6371264", "0.63690764", "0.6319099", "0.63014305", "0.629577", "0.62690765", "0.6263312", "0.6262168", "0.62032706", "0.6182522", "0.6180432", "0.61774904", "0.6172559", "0.6170229", "0.6167184", "0.6156288", "0.61560434", "0.61146855", "0.60941607", "0.60928273", "0.6088646", "0.6085716", "0.6035767", "0.6035767", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60267013", "0.60265416", "0.6016132", "0.6016132", "0.6016132", "0.6016132", "0.59967494", "0.59890985", "0.5978519", "0.5975706", "0.5931531", "0.59021974", "0.589874", "0.5889518", "0.588649", "0.5881281", "0.58705795", "0.5858793", "0.5851791", "0.5842958", "0.5840912", "0.58201146", "0.5819927", "0.58172876", "0.5813165", "0.58025235", "0.58015394", "0.58000845", "0.58000845", "0.5792101", "0.5791306", "0.57912886", "0.57892597", "0.57879615", "0.57782704", "0.5777887", "0.5775837", "0.5772142", "0.57518095", "0.57518095", "0.57518095", "0.57461643", "0.5740139", "0.5733469" ]
0.0
-1
String form of parameter value used to convert it to/from a string.
def value_str(self): return self._to_str(self.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_string(self, name, value):\r\n \r\n return str(value)", "def string(self, value):\n # respect {None}\n if value is None:\n # by leaving it alone\n return None\n # my value knows\n return str(value)", "def get_parameter_string(self, parameter):\n if not self.has_converged or self.parameters is None:\n return None\n if parameter not in self.parameters:\n return None\n\n fmt = self.get_parameter_format(parameter)\n unit = self.get_parameter_unit(parameter)\n value = fmt % self.parameters[parameter]\n\n error = self.errors[parameter]\n if np.isfinite(error):\n error = fmt % error\n else:\n error = None\n\n s = f\"{parameter} = {value}\"\n if error is not None:\n s += f' +/- {error}'\n if unit is not None:\n s += f' {unit}'\n\n return s", "def as_string(self, value, context=None):\n return str(value)", "def valueToString():", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"", "def value_to_string(self, value, type_class, param_info=None):\n if isinstance(value, Entry):\n var = self.get_variable(value.code_entry)\n if isinstance(value.target, list):\n return \"tuple(%s)\" % var\n return var\n else:\n if type_class == TypeClass.STRING:\n return '\"%s\"' % value\n elif type_class == TypeClass.ENUM:\n name = value.typeName\n suffix = self.get_last_part(name)\n upper_chars = [c for c in suffix if c.isupper()]\n as_name = \"%s_%s\" % (\"\".join(upper_chars), value.value)\n self.add_import('%s.%s' % (value.typeName, value.value), as_name)\n #return value.value\n return as_name\n elif type_class == TypeClass.CHAR:\n return \"uno.Char(\\\"%s\\\")\" % value.value\n elif type_class == TypeClass.SEQUENCE:\n comp_type, n = self.parse_seq(param_info)\n _comp_type_class = comp_type.getTypeClass()\n str_val = [self.value_to_string(v, _comp_type_class) for v in value]\n return \"(%s)\" % \", \".join(str_val)\n else:\n return str(value)", "def get_param_as_string(self):\n\t\treturn call_sdk_function('PrlResult_GetParamAsString', self.handle)", "def parse(self,value):\r\n\t\treturn str(value)", "def param2str(val):\n if isinstance(val, dict):\n try:\n return json.dumps(val)\n except TypeError:\n s = str(val)\n print(\"[WARNING] cannot convert value ('%s') to a string with json.dumps\" % s)\n\n return str(val)", "def get_prep_value(self, value):\n return str(value)", "def param_value(self):\n if self.string:\n return self.string\n if self.token:\n return self.token\n if self.number:\n return self.number\n if self.date:\n return self.date\n if self.quantity:\n return self.quantity\n if self.reference:\n return self.reference\n return ''", "def transform_python(self, value):\n return str(value)", "def to_string(value: Any) -> str:\n return StringConverter.to_string_with_default(value, '')", "def getString(self):\n string = self.itemType.find('format').text.strip()\n paramString = string[string.find('('):]\n string = string[:string.find('(')]\n for i in self.params.keys():\n paramString = paramString.replace(i,str(self.params[i]) if isFloat(str(self.params[i])) else '\"'+str(self.params[i]).replace('\"','\\\\\"')+'\"',1)\n return string+paramString", "def get_val_str(self):\n fmt_str = self.template.get_format_str()\n if self.val_obj is None:\n return \"\"\n elif fmt_str:\n return fmt_str % (self.val_obj.val)\n else:\n return str(self.val_obj.val)", "def to_string(self):\n return _parrot_str_to_str(self.val)", "def make_string(value):\n if value:\n return str(value)\n return None", "def str_value(self, data):\n return str(self.value(data))", "def _encode_runtime_parameter(param: data_types.RuntimeParameter) -> str:\n if param.ptype is int:\n type_enum = pipeline_pb2.RuntimeParameter.INT\n elif param.ptype is float:\n type_enum = pipeline_pb2.RuntimeParameter.DOUBLE\n else:\n type_enum = pipeline_pb2.RuntimeParameter.STRING\n type_str = pipeline_pb2.RuntimeParameter.Type.Name(type_enum)\n return f'{param.name}={type_str}:{str(dsl.PipelineParam(name=param.name))}'", "def typeString(self):\n return Parameter.string_dict[self._field.type]", "def __str__(self) -> str:\n return f\"{self.value}\"", "def __str__(self):\n return f'{self.value}'", "def _getParameterValueString(self, name):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return \"\"", "def format(cls, value: Optional[T]) -> str:\n return str(value)", "def __str__(self):\n\n return \"<ExoParameter>: {0}\".format(self.__dict__)", "def _valueString(value,verbose=0):\n\n t = type(value)\n vstr = t.__name__\n if issubclass(t, str):\n if len(value)>42:\n vstr = vstr + \", value = \"+ `value[:39]` + '...'\n else:\n vstr = vstr + \", value = \"+ `value`\n elif issubclass(t, _listTypes):\n return \"%s [%d entries]\" % (vstr, len(value))\n elif (PY3K and issubclass(t, io.IOBase)) or \\\n (not PY3K and issubclass(t, file)):\n vstr = vstr + \", \"+ `value`\n elif issubclass(t, _numericTypes):\n vstr = vstr + \", value = \"+ `value`\n elif _isinstancetype(value):\n cls = value.__class__\n if cls.__module__ == '__main__':\n vstr = 'instance of class ' + cls.__name__\n else:\n vstr = 'instance of class ' + cls.__module__ + '.' + cls.__name__\n elif issubclass(t, _functionTypes+_methodTypes):\n # try using Fredrik Lundh's describe on functions\n try:\n vstr = vstr + ' ' + describe.describe(value)\n try:\n if verbose and value.__doc__:\n vstr = vstr + \"\\n\" + value.__doc__\n except AttributeError:\n pass\n except (AttributeError, TypeError):\n # oh well, just have to live with type string alone\n pass\n elif issubclass(t, _numpyArrayType):\n vstr = vstr + \" \" + str(value.dtype) + \"[\"\n for k in range(len(value.shape)):\n if k:\n vstr = vstr + \",\" + `value.shape[k]`\n else:\n vstr = vstr + `value.shape[k]`\n vstr = vstr + \"]\"\n else:\n # default -- just return the type\n pass\n return vstr", "def get_string_value(self, obj, field):\n return smart_unicode(field.value_to_string(obj))", "def format_parameter(param, required):\n\n param_string = check_param(flatten_param(param))\n if not required:\n param_string += '=None'\n return param_string", "def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s", "def format(value):\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n if isinstance(value, bool):\n return 'true' if value is True else 'false'\n elif isinstance(value, dict):\n assert False, 'Not implemented for dictionary type'\n elif hasattr(value, '__len__'): # should cover list and numpy array\n return '{{{}}}'.format(', '.join([str(v) for v in value]))\n else: # assume scalar value\n return value", "def t_STRING(t):\n return t", "def getStrParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n return value", "def __str__(self):\n\n\t\treturn str(self.__value)", "def convert_to_string(value: Any) -> str:\n if isinstance(value, str):\n return value\n\n if isinstance(value, bytes):\n return value.decode(\"utf-8\")\n\n return str(value)", "def __str__(self):\n\n\t\tif self.rawValue == None: return str()\n\n\t\tx = self.rawValue\n\n\t\tif not x.isdigit() or len(x) != 44 or len(set(x)) == 1:\n\t\t\treturn self.rawValue\n\n\t\treturn '{} {} {} {} {} {} {} {} {} {} {}'.format(x[:4], x[4:8], x[8:12], x[12:16], x[16:20], x[20:24], x[24:28], x[28:32], x[32:36], x[36:40], x[40:44])", "def string_p(value):\n if type(value) is not str:\n raise Invalid(\"invalid value type {value}\".format(value=value))", "def __str__(self):\n # Note: Before equate was handled explicitly, the old\n # code would do either \"--name \" or \"--name=value \",\n # or \" -name \" or \" -name value \". This choice is now\n # now made explicitly when setting up the option.\n if self.value is None:\n return \"%s \" % self.names[0]\n if self.is_filename:\n v = _escape_filename(self.value)\n else:\n v = str(self.value)\n if self.equate:\n return \"%s=%s \" % (self.names[0], v)\n else:\n return \"%s %s \" % (self.names[0], v)", "def __str__(self):\n return self.parameters.__str__()", "def getParamString(paramName, arrayIndex, paramValue):\n\n printGauge = False\n spec1 = \"{:6}\"\n spec2 = \"{:5}\"\n spec3 = \"{:>15.6E}\"\n\n formatSpecParam = ('IFORMT', 'IFORMY')\n\n if paramName in formatSpecParam:\n fullStr = \" \" + spec1.format(paramName) + '\\n' + \" \" + paramValue\n\n else:\n fullStr = \" \" + \\\n spec1.format(paramName) + spec2.format(arrayIndex) + \\\n spec3.format(paramValue)\n\n # if printGauge == True:\n # print(\"12345612345123456789012345\")\n\n return fullStr + '\\r\\n'", "def putstrparam(self,param_,parvalue_):\n if isinstance(parvalue_,unicode):\n parvalue_ = parvalue_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putstrparam(self.__nativep,param_,parvalue_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def call_str(pvs):\n s = \"'{}', '{}'\".format(pvs.get('place'), pvs.get('stat_var'))\n if pvs.get('measurement_method'):\n s += \", measurement_method='{}'\".format(\n pvs.get('measurement_method'))\n if pvs.get('observation_period'):\n s += \", observation_period='{}'\".format(\n pvs.get('observation_period'))\n if pvs.get('unit'):\n s += \", unit='{}'\".format(pvs.get('unit'))\n if pvs.get('scaling_factor'):\n s += \", scaling_factor={}\".format(pvs.get('scaling_factor'))\n return s", "def __str__(self):\n if not six.PY3:\n return unicode(self.args[0]).encode('utf-8')\n\n return self.args[0]", "def putstrparam(self,param_,parvalue_): # 3\n if not isinstance(param_,sparam): raise TypeError(\"Argument param has wrong type\")\n res = self.__obj.putstrparam(param_,parvalue_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def get_string(self, **kwargs):\n ...", "def __str__(self): # pragma: nocover\n return str(self.value)", "def getstrparam(self,param_):\n maxlen_ = (1 + self.getstrparamlen((param_)))\n len_ = ctypes.c_int32()\n parvalue_ = (ctypes.c_char * (maxlen_))()\n res = __library__.MSK_XX_getstrparam(self.__nativep,param_,maxlen_,ctypes.byref(len_),parvalue_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n len_ = len_.value\n _len_return_value = len_\n _parvalue_retval = parvalue_.value.decode(\"utf-8\",errors=\"replace\")\n return (_len_return_value,_parvalue_retval)", "def makeinputstring(variabel):\r\n if type(variabel) == int:\r\n return str(variabel)\r\n elif type(variabel) == float:\r\n return str(int(float(variabel)))\r\n else:\r\n return str(variabel)", "def text(self, v=''):\n return str(v)", "def convert_to_str(value):\n\tif value is None:\n\t\treturn '-'\n\treturn str(value)", "def getstrparam(self,param_): # 3\n if not isinstance(param_,sparam): raise TypeError(\"Argument param has wrong type\")\n maxlen_ = (1 + self.getstrparamlen((param_)))\n arr_parvalue = array.array(\"b\",[0]*((maxlen_)))\n memview_arr_parvalue = memoryview(arr_parvalue)\n res,resargs = self.__obj.getstrparam(param_,maxlen_,memview_arr_parvalue)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _len_return_value,retarg_parvalue = resargs\n retarg_parvalue = arr_parvalue.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return _len_return_value,retarg_parvalue", "def get_str(self, name):\n return str(self.field(name).toString())", "def __str__(self):\n return str(self.value)", "def __str__(self):\n return str(self.value)", "def __str__(self):\n return str(self.value)", "def __str__(self):\n return str(self.value)", "def __str__(self):\n return str(self.value)", "def __str__(self):\n return str(self.value)", "def __str__(self) -> str:\n return str(self.value)", "def getEventIDValueString(*args, **kwargs):\n pass", "def convert(cls, value: Any) -> Optional[str]:\n # Can be optional\n if value is None:\n return None\n\n cls.assert_value_ok(isinstance(value, str), value)\n\n return value", "def Value(self) -> str:", "def __str__ (self):\n return f'\"{self.value[0]}|{self.value[1]}\"'", "def fmt(self, val):\n if type(val) in self.QUOTABLE_TYPES:\n s = decode_string(val)\n return u\"{0}{1}{2}\".format(self.quotechar, s, self.quotechar)\n else:\n return decode_string(str(val))", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def url_to_string(cls, v, values, **kwargs):\n return str(v)", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def __str__(self) -> str:\n return self.value", "def string_val(self) -> str:\n return self.current_token", "def process_bind_param(self, value, dialect) -> str:\n assert isinstance(value, self._cls)\n return value.SerializeToString()", "def getvalue(self):\n if self._value:\n _d,_t=self._value.strip(' ').replace('\"', '').split(',')\n _d=_d.split('/')\n _t=_t.split(':')\n return '%s%s%sT%s%s%s'%(_d[0].zfill(4), _d[1].zfill(2), _d[2].zfill(2),\n _t[0].zfill(2), _t[1].zfill(2), _t[2].zfill(2))", "def __str__(self):\r\n\t\treturn \"({}, {})\".format(self.type, self.value)", "def format_arg(arg_name: str, value: Any, max_length: int = 200) -> str:\n return \"{arg_name}={value}\".format(\n arg_name=arg_name, value=trim_string(repr(value), max_length=max_length)\n )", "def __str__(self):\n\n if self._b == b'':\n return ''\n\n if len(self.quote) == 1:\n s = self.to_short()\n else:\n s = self.to_long()\n\n assert eval('b' + self.quote + s + self.quote) == self._b\n\n return s", "def string_builder_variant(request):\n return request.param", "def _get_parameter_string(min_key=-1, min_mode=0,\n min_acousticness=0.0, min_danceablility=0.0,\n min_energy=0.0, min_instrumentalness=0.0,\n min_liveness=0.0, min_loudness=-60,\n min_speechiness=0.0, min_valence=0.0, min_tempo=0,\n max_key=11, max_mode=1,\n max_acousticness=1.0, max_danceablility=1.0,\n max_energy=1.0, max_instrumentalness=1.0,\n max_liveness=1.0, max_loudness=0,\n max_speechiness=1.0, max_valence=1.0, max_tempo=99999):\n return (f\"&min_key={min_key}&max_key={max_key}\" +\n f\"&min_mode={min_mode}&max_mode={max_mode}\" +\n f\"&min_acousticness={min_acousticness}&max_acousticness={max_acousticness}\" +\n f\"&min_danceablility={min_danceablility}&max_danceablility={max_danceablility}\" +\n f\"&min_energy={min_energy}&max_energy={max_energy}\" +\n f\"&min_instrumentalness={min_instrumentalness}&max_instrumentalness={max_instrumentalness}\" +\n f\"&min_liveness={min_liveness}&max_liveness={max_liveness}\" +\n f\"&min_loudness={min_loudness}&max_loudness={max_loudness}\" +\n f\"&min_speechiness={min_speechiness}&max_speechiness={max_speechiness}\" +\n f\"&min_valence={min_valence}&max_valence={max_valence}\" +\n f\"&min_tempo={min_tempo}&max_tempo={max_tempo}\")", "def params_to_arg_string(**params):\n\targs = params_to_args(**params)\n\treturn ' '.join(args)", "def convert_to_string(value):\n if isinstance(value, str):\n return value\n # Boolean test must come before integer check!\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, int):\n return str(value)\n elif isinstance(value, float):\n return str(value)\n elif isinstance(value, UTCDateTime):\n return str(value).replace(\"Z\", \"\")\n else:\n raise TypeError(\"Unexpected type %s\" % repr(value))", "def as_str(self):\n return self.as_type(str)", "def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def to_safe_annotation_value(value):\n return str(value)", "def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)", "def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)", "def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)", "def varStringMod(self, arg):\n\t\targ[0] = \"'\" + arg[0] + \"'\"\n\t\treturn arg", "def _getArgStr(self):\n return \"name=%s, host=%s, port=%s\" % (self.name, self.host, self.port)", "def _getArgStr(self):\n return \"name=%r\" % (self.name)", "def plain_text( argument ):\n return str( argument )", "def __str__(self):\r\n return str(self.value())", "def to_python(self, name, value):\r\n \r\n if len( str(value).strip() ) == 0:\r\n raise admin.ArgValidationException(\"The value for the '%s' parameter cannot be empty\" % (name))\r\n \r\n return value", "def __str__(self):\n\n if not self:\n return '\\0'\n\n parts = []\n for name, value in self:\n if value is None:\n item = name\n else:\n item = '%s=%s' % (name, value)\n if (not self.strict) and (len(item) > 255):\n item = item[:255]\n parts.append(chr(len(item)))\n parts.append(item)\n\n return ''.join(parts)", "def value_to_string(self, obj):\n value = self.value_from_object(obj)\n return value", "def ToString():\n @pass_failures\n def to_string(data):\n value = data.value\n if isinstance(value, Mapping):\n value = {k: str(v) for k, v in value.items()}\n else:\n value = str(value)\n data.value = value\n return data\n return to_string", "def _sanitize_param(self, param):\n if param:\n # Can't send unicode.\n param = str(param)\n return param" ]
[ "0.7203786", "0.697954", "0.69234365", "0.68567574", "0.6801373", "0.67694676", "0.6758182", "0.6719559", "0.66809326", "0.6662741", "0.6623675", "0.65918845", "0.6585053", "0.65313864", "0.6505384", "0.64526814", "0.64182854", "0.635534", "0.6343342", "0.62921274", "0.6290848", "0.6281789", "0.6272183", "0.6267714", "0.62626904", "0.6242687", "0.6229593", "0.6222528", "0.62213117", "0.62185353", "0.621057", "0.62033653", "0.6184213", "0.6171911", "0.6154228", "0.6148852", "0.61456245", "0.61371183", "0.6132648", "0.6127324", "0.61240166", "0.61222273", "0.61150575", "0.6114813", "0.6105103", "0.6102014", "0.60945946", "0.6093426", "0.60883784", "0.60874885", "0.6086472", "0.6076169", "0.60742396", "0.6072478", "0.6072478", "0.6072478", "0.6072478", "0.6072478", "0.6072478", "0.6060853", "0.60574454", "0.6052615", "0.604522", "0.60313326", "0.6029417", "0.6027284", "0.6027284", "0.6027284", "0.6021899", "0.6020515", "0.6020515", "0.6020515", "0.6020515", "0.6020515", "0.5988751", "0.59805346", "0.5977559", "0.5956816", "0.5954879", "0.5952667", "0.5947031", "0.59464985", "0.59326655", "0.5911685", "0.5892379", "0.58921033", "0.589018", "0.58894813", "0.58894813", "0.58894813", "0.58889014", "0.58721745", "0.5866968", "0.58663994", "0.58642167", "0.5862382", "0.5862128", "0.5859312", "0.58560246", "0.5848845" ]
0.63790995
17
Short humanfriendly string representation of parameter object.
def __repr__(self): return "<katpoint.Parameter %s = %s %s at 0x%x>" % \ (self.name, self.value_str, self.units, id(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n\n return \"<ExoParameter>: {0}\".format(self.__dict__)", "def __str__(self):\n return self.parameters.__str__()", "def __str__(self):\r\n res = [self.Name + ' parameters:']\r\n for t in self._tracked_properties:\r\n res.append(t + ':' + str(getattr(self, t)))\r\n for k, v in sorted(self.Params.items()):\r\n res.append(str(k) + ':' + str(v))\r\n return '\\n'.join(res)", "def __str__(self):\n # defaults to the class name\n if self.p is None:\n return self.__class__.__name__\n\n # class name and parameter values\n temp = [str(i) for i in self.p]\n return self.__class__.__name__+'('+', '.join(temp)+')'", "def __str__(self):\n num_active = len([p for p in self if p])\n summary = \"%s has %d parameters with %d active (non-default)\" % \\\n (self.__class__.__name__, len(self), num_active)\n if num_active == 0:\n return summary\n return summary + ':\\n' + '\\n'.join(('%s = %s %s (%s)' % ps)\n for ps in self.param_strs())", "def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"", "def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s", "def to_short_string(self):\n return f'{self.name} - {self.resource_type}'", "def __str__(self):\n args = []\n if len(self.args) > 0:\n args += ['{}'.format(a) for a in self.args]\n if len(self.kwargs) > 0:\n args += [\"{}={}\".format(k, v) for k, v in self.kwargs.items()]\n return '{}({})'.format(self.name, ', '.join(args))", "def __str__(self):\n return \"{}: {} params, wires {}\".format(self.name, len(self.params), self.wires)", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def __repr__(self):\n name = self.__class__.__name__\n # values = \", \".join(\"{}={}\".format(k, repr(v)) for k, v in sorted(self.__dict__.items())\n # if k[0] != \"_\" and not k.endswith('manager'))\n values = \", \".join(\"{}={}\".format(k, v) for k, v in self.parameters.items())\n return \"{}({})\".format(name, values)", "def format_parameter(param, required):\n\n param_string = check_param(flatten_param(param))\n if not required:\n param_string += '=None'\n return param_string", "def display_parameters(self):\n l = []\n for param in self.parameters.all():\n if len(param.value) > 16:\n l.append(u\"{}={}...\".format(param.name, param.value[:16]))\n else:\n l.append(u\"{}={}\".format(param.name, param.value))\n return \"; \".join(l)", "def __parameters_string(self):\n if self._parameters == list():\n return ''\n\n docstring = \"\"\"\n\nParameters:\n\"\"\"\n \n # Compute maximum length of any parameter name\n maxlen = 0\n for param in self._parameters:\n maxlen = max(maxlen, len(param[0]))\n\n # Build documentation for parameters\n for (on_param, param) in enumerate(self._parameters):\n if on_param > 0:\n docstring += '\\n'\n\n docstring += ' ' + param[0].ljust(maxlen + 2)\n doc = wrap(param[1], columns - maxlen - 4)\n padding = str('')\n for line in doc.split('\\n'):\n docstring += padding + line + '\\n'\n padding = str('').ljust(maxlen + 4)\n \n # Pull off the final '\\n'\n return docstring[0:len(docstring)-1]", "def _parameter_summary(self, parameters, parameters_to_show=4):\n params = parameters\n if len(parameters) > parameters_to_show:\n params = parameters[:2] + [\"...\"] + parameters[-2:]\n return \", \".join(params)", "def __str__(self):\n info_nvps = [\n ('sid', self.sid)\n ] + self.__str_additional_info_nvps__()\n # Create a \"name=val\" string for each name-value pair, then concatenate\n # them all together, separated by commas.\n info_str = ', '.join([\n '%s=%s' % (name, str(val)) for (name, val) in info_nvps])\n return '%s(%s)' % (self.__class__.__name__, info_str)", "def __str__(self):\n return self.params", "def optstr(self) -> str:\n typestr: str = (\n self.typestr().upper() if self.type_frozen else self.typestr()\n )\n if not self.ns_param or not self.argname_shorten:\n # * makes sure it's not wrapped\n return (\n self.namestr()\n if self.is_help\n else f\"{self.namestr()}*<{typestr}>\"\n )\n\n ret: List[str] = []\n for term in sorted(self.terminals, key=len):\n ret.append(f\"~<ns>.{term}\")\n return \", \".join(ret) + f\"*<{typestr}>\"", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def __str__(self):\n\n def args_with_defaults(args, defaults):\n \"\"\"\n Args to string, with defaults inserted where appropriate\n\n :param args: arguments\n :type args: ``list``\n :param defaults: default value of arguments\n :type defaults: ``list``\n\n :return: string representation of the signature arguments\n :rtype: ``str``\n \"\"\"\n\n def argument(arg, default):\n \"\"\"\n Arg=Default pair if Default is present\n\n :param arg: argument name\n :type arg: ``str``\n :param default: default value for argument\n :type default: ``object``\n\n :return: string representation\n :rtype: ``str``\n \"\"\"\n return \"{0}={1}\".format(arg, default) if default else arg\n\n return \", \".join(\n reversed(\n [\n argument(arg, default)\n for arg, default in zip_longest(\n reversed(args), reversed(defaults)\n )\n ]\n )\n )\n\n args = \"\".join(\n [\n args_with_defaults(self.args, self.defaults),\n \", *{0}\".format(self.varargs) if self.varargs else \"\",\n \", **{0}\".format(self.keywords) if self.keywords else \"\",\n ]\n )\n\n return \"{0}({1})\".format(self.name, args)", "def __str__(self):\n return type(self).__name__ + str(vars(self))", "def optstr(self) -> str:\n if self.is_help:\n return self.namestr()\n typestr: str = (\n self.typestr().upper() if self.type_frozen else self.typestr()\n )\n\n if not self.ns_param or not self.argname_shorten:\n # * makes sure it's not wrapped'\n return f\"{self.namestr()}*[{typestr}]\"\n\n ret: List[str] = []\n for term in sorted(self.terminals, key=len):\n ret.append(f\"~<ns>.{term}\")\n return \", \".join(ret) + f\"*[{typestr}]\"", "def __str__(self):\n if self.f_has_range():\n lenstr = \"len:%d\" % self.f_get_range_length()\n else:\n lenstr = \"\"\n\n if self.v_comment:\n commentstr = \"`%s`\" % self.v_comment\n else:\n commentstr = \"\"\n\n if commentstr or lenstr:\n if commentstr and lenstr:\n combined_str = \"%s, %s\" % (lenstr, commentstr)\n elif commentstr:\n combined_str = commentstr\n elif lenstr:\n combined_str = lenstr\n else:\n raise RuntimeError(\"You shall not pass!\")\n\n infostr = \" (%s)\" % combined_str\n\n else:\n infostr = \"\"\n\n return_string = \"%s %s%s\" % (self.f_get_class_name(), self.v_full_name, infostr)\n\n if not self.f_is_empty():\n return_string += \": \" + self.f_val_to_str()\n\n return return_string", "def param_description(hf, var):\n val = hf['/input/params/%s' % var].value\n if type(val) != str:\n val = val.decode('UTF-8')\n val = unpickle(val)\n desc = val.description\n\n if desc:\n return desc\n return var", "def __str__(self):\n return \"<%s: %s>\" % (self.__class__, self.describe())", "def __str__(self):\n return '\\tHandle: %(handle)d (0x%(handle).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'handle': self.handle, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\n return '\\tHandle: %(handle)d (0x%(handle).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'handle': self.handle, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\n\n OptiObjFunc_str = \"\"\n if self.parent is None:\n OptiObjFunc_str += \"parent = None \" + linesep\n else:\n OptiObjFunc_str += (\n \"parent = \" + str(type(self.parent)) + \" object\" + linesep\n )\n OptiObjFunc_str += 'description = \"' + str(self.description) + '\"' + linesep\n if self._func[1] is None:\n OptiObjFunc_str += \"func = \" + str(self._func[1])\n else:\n OptiObjFunc_str += (\n \"func = \" + linesep + str(self._func[1]) + linesep + linesep\n )\n return OptiObjFunc_str", "def get_parameter_string(self, parameter):\n if not self.has_converged or self.parameters is None:\n return None\n if parameter not in self.parameters:\n return None\n\n fmt = self.get_parameter_format(parameter)\n unit = self.get_parameter_unit(parameter)\n value = fmt % self.parameters[parameter]\n\n error = self.errors[parameter]\n if np.isfinite(error):\n error = fmt % error\n else:\n error = None\n\n s = f\"{parameter} = {value}\"\n if error is not None:\n s += f' +/- {error}'\n if unit is not None:\n s += f' {unit}'\n\n return s", "def __str__(self):\n return \"%s(%s)\" % (self[0], \", \".join(arg.name for arg in self[1:]))", "def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string", "def _params_formatter(field, description):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(rst.escape(field['name']))\n tail = description\n return heads, tail", "def _to_str(obj: object) -> str:\n if obj is Ellipsis:\n return '...'\n elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE):\n if obj.__module__ == 'builtins':\n return obj.__qualname__\n else:\n return f'{obj.__module__}.{obj.__qualname__}'\n else:\n return repr(obj)", "def pretty(self):\n def arg_to_str(name, value):\n if value is True:\n return '+' + name\n elif value is False:\n return '~' + name\n elif isinstance(value, Var):\n if value.name == name:\n return '?' + value.name\n return name + \"=\" + value.name\n else:\n return name + \"=\" + repr(value)\n\n if len(self.args) == 0:\n return self.name\n return \"{}[{}]\".format(self.name,\n \", \".join(arg_to_str(name, value)\n for name, value in self.args))", "def __str__(self):\n s = \"\"\n for field in self.fields:\n if field.size not in VAR_PREFIXES:\n s += field.name + \": \" + str(field.size) + \" bits with value \" + str(field.value) + \".\\n\"\n else:\n s += field.name + \": variable size: \" + str(field.size) + \", with value \" + str(field.value) + \".\\n\"\n\n return s", "def __str__(self):\n return '\\tCode: %(code)d (0x%(code).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'code': self.code, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\r\n\t\treturn \"({}, {})\".format(self.type, self.value)", "def __repr__(self):\n class_name = type(self).__name__\n paras = self.get_params(deep=False)\n result = [class_name, \"(\"]\n first = True\n for name, para in paras.items():\n if first:\n first = False\n else:\n result.append(\", \")\n result.append(self.__repr_parameter__(name, para))\n return \"\".join(result) + \")\"", "def __str__(self):\n return '\\tOpcode: %(opcode)d (0x%(opcode).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'opcode': self.opcode, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def get_str(self, obj):\n if self.pretty:\n return pprint.pformat(obj)\n else:\n return str(obj)", "def __repr__(self):\n return pformat(vars(self))", "def short_def(self):\r\n return f\"{self.lat}, {self.lon}\"", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'", "def _params_formatter(field):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(field['name'])\n tail = field.get('description', '')\n return heads, tail", "def __str__(self):\n return '{}({})'.format(self.name, ', '.join(self._kernel_args))", "def __str__(self):\n return (self._name + \", \" + self._type + \" in \" + self._const)", "def _short_info(self) -> str:\n nullable = \"Nullable \" if self._is_nullable else \"\"\n\n # Good candidate for python pattern matching once <3.10 support no longer required\n num_metadata_items = len(self.__metadata)\n if num_metadata_items == 0:\n metadata = \"\"\n elif num_metadata_items == 1:\n metadata = f\" [with {num_metadata_items} metadata item]\"\n else:\n metadata = f\" [with {num_metadata_items} metadata items]\"\n\n return f\"<{nullable}{self.__class__.__name__}{metadata}: {self._resolve_field_name()}>\"", "def __str__(self) -> str:\n str_ = self.method\n if self.basis is not None:\n str_ += f'/{self.basis}'\n if self.auxiliary_basis is not None:\n str_ += f', auxiliary_basis: {self.auxiliary_basis}'\n if self.dispersion is not None:\n str_ += f', dispersion: {self.dispersion}'\n if self.cabs is not None:\n str_ += f', cabs: {self.cabs}'\n if self.solvation_method is not None:\n str_ += f', solvation_method: {self.solvation_method}'\n if self.solvent is not None:\n str_ += f', solvent: {self.solvent}'\n if self.solvation_scheme_level is not None:\n str_ += f\", solvation_scheme_level: '{str(self.solvation_scheme_level)}'\"\n if self.software is not None:\n str_ += f', software: {self.software}'\n if self.software_version is not None:\n str_ += f', software_version: {self.software_version}'\n if self.args is not None and self.args and all([val for val in self.args.values()]):\n if any([key == 'keyword' for key in self.args.keys()]):\n str_ += ', keyword args:'\n for key, arg in self.args.items():\n if key == 'keyword':\n str_ += f' {arg}'\n if self.method_type is not None:\n str_ += f' ({self.method_type})'\n return str_", "def _formal_params(self, doclet):\n name, paren, params = self.arguments[0].partition('(')\n return ('(%s' % params) if params else '(%s)' % ', '.join(doclet['meta']['code']['paramnames'])", "def __str__(self):\n return f\"{self._desc:16s}\"", "def __str__(self) -> str:\n if self.scalar_vector:\n return f\"({self.w:-.4f} {self.x:+.4f}i {self.y:+.4f}j {self.z:+.4f}k)\"\n return f\"({self.x:-.4f}i {self.y:+.4f}j {self.z:+.4f}k {self.w:+.4f})\"", "def arg_par(x):\n if (isinstance(x,Operation) \n and not isinstance(x, self.__class__) \n and not isinstance(x,SingleArgOperation)):\n return \"(%s)\" % str(x)\n return str(x)", "def __make_description(self, param_name):\n value = self._params.get_value(param_name)\n return \"%s (Currently %s)\" % (param_name, str(value))", "def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))", "def __str__(self):\n return stringify(\n Inspect(\n self,\n help=True,\n methods=True,\n private=True,\n dunder=False,\n sort=True,\n all=False,\n ),\n maxlen=-1,\n )", "def object_to_param_str(obj):\n return b64encode(compress(cPickle.dumps(obj))).decode('utf8')", "def type_name(self):\n return \"%s %s\" % (self.param_type, self.name)", "def parameter_symbol(self) -> str:\n return self._parameter_symbol", "def __repr__(self):\n\n param = \"\"\n action = None\n\n if isinstance(self.items, list):\n for i in self.items:\n if len(param) > 0:\n param += \", \"\n param += i.__repr__()\n\n if self.action is not None:\n action = self.action.__name__\n\n return \"%s(%s, action=%s)\" % (self.__class__.__name__, param, action)", "def format_args(self):\r\n is_ctor = self.object.cls.name == self.object.name\r\n\r\n if self.object.args:\r\n if self.object.args[0] in (\"obj\", \"self\") and not is_ctor:\r\n return \"(\" + \", \".join(self.object.args[1:]) + \")\"\r\n else:\r\n return \"(\" + \", \".join(self.object.args) + \")\"", "def stringify_short(self):\n return self.stringify()", "def param_strs(self):\n name_len = max(len(p.name) for p in self)\n value_len = max(len(p.value_str) for p in self.params.values())\n units_len = max(len(p.units) for p in self.params.values())\n return [(p.name.ljust(name_len), p.value_str.ljust(value_len),\n p.units.ljust(units_len), p.__doc__)\n for p in self.params.values() if p]", "def __str__(self):\n self._validate()\n commandline = \"%s \" % self.program_name\n for parameter in self.parameters:\n if parameter.is_set:\n #This will include a trailing space:\n commandline += str(parameter)\n return commandline.strip() # remove trailing space", "def _format_parameterArray(self):\n return \"{%s; %s}\" % tuple(', '.join(str(x) for x in l)\n for l in self.parameterArray())", "def __repr__(self):\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(str, self.pars)))", "def call_str(pvs):\n s = \"'{}', '{}'\".format(pvs.get('place'), pvs.get('stat_var'))\n if pvs.get('measurement_method'):\n s += \", measurement_method='{}'\".format(\n pvs.get('measurement_method'))\n if pvs.get('observation_period'):\n s += \", observation_period='{}'\".format(\n pvs.get('observation_period'))\n if pvs.get('unit'):\n s += \", unit='{}'\".format(pvs.get('unit'))\n if pvs.get('scaling_factor'):\n s += \", scaling_factor={}\".format(pvs.get('scaling_factor'))\n return s", "def __str__(self):\n if self.default_kind:\n kind_str = \"\"\n else:\n kind_str = \", kind={}\".format(self.kind)\n # End if\n return \"{}(len={}{})\".format(self.typestr, self.lenstr, kind_str)", "def test__str__and__repr__(self, kwargs, expected):\n fparam = FParameter(**kwargs)\n assert str(fparam) == expected\n assert repr(fparam) == '<FParameter \"{}\">'.format(expected)", "def __str__(self):\n str = \"[{}] ({}) {}\"\n return (str.format(self.__class__.__name__, self.id, self.__dict__))", "def __str__(self):\n s = \"[{}] ({}) {}\".format(str(\n type(self).__name__), self.id, self.__dict__)\n return s", "def object_name(self):\n name = self.full_name\n if self.overload and self.overload.overload_id:\n name += f'({self.overload.overload_id})'\n return name", "def __str__(self):\n return f\"Variable(type={self._type}, id={self._id}, value={self.status}, init={self.init})\"", "def params_desc(self):\n return \"{}/{}/{}/{}\".format(\n self.learning_rate, self.movement, self.milestones, self.gamma\n )", "def __str__(self) -> str:\n return f'{self.name}.{self.arity}'", "def __str__(self):\n return 'Tensor product {}: {} params, wires {}'.format([i.name for i in self.obs], len(self.params), self.wires)", "def print_general_param(self, name=None, disp=True):\n\n if name is None:\n list = {}\n\n for name in self.params:\n list[name] = self.params[name].get_description()\n\n return self._print_params({'general': list}, disp)\n else:\n if name not in self.params:\n raise IndexError('%s is not a valid general parameter ' % name)\n\n return self._print_params({'general': {name: self.params[name].get_description()}}, disp)", "def __str__(self):\n s = \"\"\n for x in self.__members:\n v = getattr(self, x)\n if s: s+=\", \"\n s += \"%s: %s\" % (x, `v`)\n return s", "def __str__(self):\n raise json.dumps(self.parameters)", "def call_spec_string():\n # pylint: disable=protected-access\n frame = sys._getframe(1)\n argvals = inspect.getargvalues(frame)\n if argvals.args[0] == 'self':\n return inspect.formatargvalues(argvals.args[1:], *argvals[1:])\n else:\n return inspect.formatargvalues(*argvals)", "def __str__(self):\n return str(self._name + \", \" + self._value)", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self):\n return self.format()", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"{0}(name='{1}', type={2}(0x{2:x}), id='{3}')\".format(\n self.__class__.__name__, self.type_name, self.type, self.id\n )", "def arg_str(self):\n\n args = ['self']\n args.extend([x.name for x in self.required_inputs])\n args.extend([\"{}=None\".format(x.name) for x in self.optional_inputs])\n\n return \", \".join(args)", "def __str__(self):\r\n return '[t1=%.1fs, t2=%.1fs, tmax=%.1fs, Lmax=%.1fs]' % (self.t1, self.t2, self.tmax, self.Lmax)", "def __str__(self):\n return f\"{self.full_name} ({self.short_name})\"", "def __str__(self):\n return f\"{self.full_name} ({self.short_name})\"", "def __str__(self):\n return '[{0}, {1}]'.format(self.timeValuePairs, self.defaultValue)", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__, self.id,\n self.__dict__)", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__, self.id,\n self.__dict__)", "def _encode_runtime_parameter(param: data_types.RuntimeParameter) -> str:\n if param.ptype is int:\n type_enum = pipeline_pb2.RuntimeParameter.INT\n elif param.ptype is float:\n type_enum = pipeline_pb2.RuntimeParameter.DOUBLE\n else:\n type_enum = pipeline_pb2.RuntimeParameter.STRING\n type_str = pipeline_pb2.RuntimeParameter.Type.Name(type_enum)\n return f'{param.name}={type_str}:{str(dsl.PipelineParam(name=param.name))}'", "def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"<{class_name} {self.property_name}={self.limit_string}>\"", "def display_name(self, obj):\n return six.text_type(obj)", "def __str__(self):\n if self.flaky:\n fmt = 'flaky | '\n else:\n fmt = ''\n fmt += '{2}: {0}'\n if self.variant:\n fmt += ' {1}'\n return fmt.format(*self)", "def __str__(self):\n return '{}.{} >> {}'.format(self.scope, self.name,\n '/'.join(map(str, self.variables)))", "def __str__(self):\r\n name = self.__class__.__name__\r\n return \"[{}] ({}) {}\".format(name, self.id, self.__dict__)", "def _repr_kwargs(self):\n\n return \"\"" ]
[ "0.74058735", "0.7150738", "0.70581096", "0.69755995", "0.6968546", "0.6926491", "0.6794039", "0.67680633", "0.6709835", "0.67044413", "0.66996485", "0.6644361", "0.65927416", "0.6591025", "0.65706384", "0.65648234", "0.65029067", "0.64802146", "0.64295065", "0.6425345", "0.6419455", "0.64097106", "0.63934046", "0.6389445", "0.63844085", "0.63456726", "0.63415796", "0.63415796", "0.63243115", "0.6319749", "0.6319199", "0.6313085", "0.6299829", "0.62802905", "0.6272076", "0.6268226", "0.625181", "0.6250192", "0.6223268", "0.6210592", "0.62096435", "0.62094134", "0.6204231", "0.6203443", "0.61989886", "0.6197538", "0.6173187", "0.6137885", "0.61372906", "0.61129755", "0.6111311", "0.611096", "0.611001", "0.60983276", "0.60945874", "0.60941404", "0.60840225", "0.6072041", "0.60631037", "0.6052727", "0.6052195", "0.60503745", "0.60420436", "0.6029126", "0.60254765", "0.6019473", "0.60187125", "0.60136443", "0.6010935", "0.60027313", "0.59871554", "0.5985768", "0.5985693", "0.5984833", "0.59845954", "0.59728485", "0.5969876", "0.59652", "0.5963893", "0.5951162", "0.5947917", "0.59454346", "0.594505", "0.59437567", "0.59357756", "0.59175867", "0.5916259", "0.59110624", "0.5905701", "0.5905701", "0.5899271", "0.5892234", "0.5892234", "0.5890069", "0.5886252", "0.58859813", "0.5883655", "0.5876994", "0.58768106", "0.58756095" ]
0.6784789
7
Number of parameters in full model.
def __len__(self): return len(self.params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_params(self):", "def count_params(self):\n self.N = 0\n for name, param in self.model.named_parameters():\n self.N += param.numel()\n self.N_list.append(self.N)", "def num_parameters(self) -> int:\n if self._model:\n return self._model.num_parameters()\n return 0", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters())", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()", "def num_parameters(model):\n return sum([param.nelement() for param in model.parameters()])", "def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count", "def number_of_parameters(self):\n return len(self.parameters)", "def num_params(self):\n return len(self.params)", "def N(self):\n return len(self.parameters)", "def _n_parameters(self):\n raise NotImplementedError", "def get_num_parameters(self):\n return len(self.parameters)", "def n_parameters(self):\n return self.pdm.n_parameters", "def n_parameters(self):\n return sum([p.n_parameters for p in self.parameters])", "def num_parameters(self) -> int:\n return len(self) * self.convention.value", "def num_params(self) -> int:\n return self._num_params", "def num_param(self):\n return len(self._parameters)", "def get_parameter_numbers(self) -> int:\n # TODO(jeikeilim): return the number of parameter list of each layers.\n n_param = sum([x.numel() for x in self.model.parameters()])\n return n_param", "def getNumParameters(self):\n return _libsbml.Model_getNumParameters(self)", "def num_parameters(self) -> int:\n return len(self.w) + prod(self.v.shape) - len(self.v)", "def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count", "def count_parameters(also_print=True):\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total", "def calculate_num_params(self):\n num_params = 0\n for p in self.parameters():\n num_params += p.data.view(-1).size(0)\n return num_params", "def num_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters())", "def n_parameters(self) -> int:\n return nkjax.tree_size(self.parameters)", "def n_parameters(self):\n return len(self._LIST_PARAMETERS)", "def num_params(self):\r\n return np.sum([torch.tensor(param.shape).prod()\r\n for param in self.parameters()])", "def n_params(model):\n \n n_params=sum([\n np.prod([tensor.size()[k] for k in range(len(tensor.size()))])\n for tensor in list(model.parameters())])\n \n return n_params", "def _get_parameter_count(self):\n parameters_d = 5;\n size_h = self.model.size_h\n return (size_h - 1) + size_h * (\n (size_h - 1) + parameters_d + (self.model.size_aa - 1) + \n (self.model.size_ss - 1) + (self.model.size_cis - 1)\n )", "def get_num_params(self):\n if self.num_params is None:\n self.num_params = len(self.params)\n return self.num_params", "def num_params(self):\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])", "def count_params(model):\n total = 0\n for x in model.trainable_variables:\n total += np.prod(x.shape)\n return total", "def n_params(self, t_id):\n all_params = set()\n for i in range(t_id+1):\n model = self.get_model(i)\n all_params.update(model.parameters())\n all_params.update(model.buffers())\n\n return sum(map(torch.numel, all_params))", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.emb_layers) + \\\n sum(weight.numel() for weight in self.emb_projs)", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def count_parameters(self):\n return sum(p.numel() for p in self.parameters() if p.requires_grad)/1e6", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def count_params():\n param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])\n return param_count", "def count_parameters(model):\n\treturn sum(p.numel() for p in model.parameters() if p.requires_grad)", "def __len__(self) -> int:\n return len(self.parameters)", "def len_parameters(self):\n return len(self._Parameters._fields)", "def num_params(self):\n raise NotImplemented(\"Abstract, please implement in respective classes\")", "def count_parameters(model, tunable_only: bool = True) -> int:\n if tunable_only:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n else:\n return sum(p.numel() for p in model.parameters())", "def N(self) -> int:\n return self.params.N", "def num_params():\n total_num = 0\n for var in tf.trainable_variables():\n shape = var.get_shape()\n total_num += functools.reduce(operator.mul, [dim.value for dim in shape], 1)\n return total_num", "def countParameters(self):\n return sum(p.numel() for p in self.model.parameters() if p.requires_grad)", "def nb_parameters(net):\n return sum(p.numel() for p in net.parameters())", "def count_parameters(model: Tuple[tuple, tuple, tuple, tuple, str]) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def nVariables(self):\n return len(self.variables)", "def dimensions(self):\n return len(self.parameter_names)", "def n_global_parameters(self):\n return self.global_transform.n_parameters", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def num_prunable_parameters(self) -> int:\n return sum(l.weight.numel() for l in self.out_layers) + \\\n sum(weight.numel() for weight in self.out_projs)", "def param_size(model):\n n_params = sum(\n np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head'))\n return n_params / 1024. / 1024.", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_parameters():\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n name = variable.name\n shape = variable.get_shape()\n #print(shape)\n #print(len(shape))\n variable_parameters = 1\n for dim in shape:\n #print(dim)\n variable_parameters *= dim.value\n print(name, [dim for dim in shape], variable_parameters)\n total_parameters += variable_parameters\n print('Number of trainable parameters = {}'.format(total_parameters))", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def count_params(model: torch.nn.Module) -> int:\n return sum(p.numel() for p in model.parameters() if p.requires_grad)", "def num_trainable_parameters(self) -> int:\n if vocabulary.is_empty(self.vocab, self.config.features.configured_namespaces):\n self._LOGGER.warning(\n \"At least one vocabulary of your features is still empty! \"\n \"The number of trainable parameters usually depends on the size of your vocabulary.\"\n )\n return sum(p.numel() for p in self._model.parameters() if p.requires_grad)", "def model_numel(model, param_dims=[2, 4], param_types=['weight', 'bias']):\n total_numel = 0\n for name, param in model.state_dict().items():\n # Extract just the actual parameter's name, which in this context we treat as its \"type\"\n if param.dim() in param_dims and any(type in name for type in param_types):\n total_numel += torch.numel(param)\n return total_numel", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def __len__(self) -> int:\n return len(self.variables)", "def calculate_num_params(self) -> None:\n for name, param in self.module.named_parameters():\n self.num_params += param.nelement()\n self.trainable &= param.requires_grad\n\n if name == \"weight\":\n ksize = list(param.size())\n # to make [in_shape, out_shape, ksize, ksize]\n if len(ksize) > 1:\n ksize[0], ksize[1] = ksize[1], ksize[0]\n self.kernel_size = ksize\n\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.inner_layers[name] = list(param.size())", "def param_size(module:nn.Module):\n return np.sum(v.numel() for name, v in module.named_parameters() \\\n if \"auxiliary\" not in name)", "def num_vars(self):\n return self.nvars", "def model_params_size(model, param_dims=[2, 4], param_types=['weight', 'bias']):\n _, _, sparse_params_cnt = model_params_stats(model, param_dims, param_types)\n return sparse_params_cnt", "def getNumParameters(self):\n return _libsbml.KineticLaw_getNumParameters(self)", "def _get_param_size(module: torch.nn.Module):\n return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()])", "def get_number_of_models():\n return 8", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def num_hyperparameters(self):\n return self._hyperparameters.size", "def parameter_count(model: nn.Module) -> typing.DefaultDict[str, int]:\n r = defaultdict(int)\n for name, prm in model.named_parameters():\n size = prm.numel()\n name = name.split(\".\")\n for k in range(0, len(name) + 1):\n prefix = \".\".join(name[:k])\n r[prefix] += size\n return r", "def print_num_params(model: nn.Module):\n if type(model) == DistributedDataParallel:\n model = model.module\n\n # Count all parameteres\n sum_params = count_params(model)\n\n # Count SPN parameters\n spn_params = sum_params\n\n # Print\n logger.info(f\"Number of parameters:\")\n # logger.info(f\"- Total: {sum_params / 1e6: >8.3f}M\")\n logger.info(\n f\"- SPN: {spn_params / 1e6: >8.3f}M ({spn_params / sum_params * 100:.1f}%)\"\n )\n # logger.info(f\"- NN: {nn_params / 1e6: >8.3f}M ({nn_params / sum_params * 100:.1f}%)\")", "def get_num_variables(self):\n return len(self.variables)", "def get_num_params(self):\n if self.num_params is None:\n import inspect\n argspec = inspect.getfullargspec(self.get_code())\n if argspec.varargs or argspec.varkw:\n self.num_params = -1\n else:\n self.num_params = len(argspec.args)\n return self.num_params", "def count_parameters(self) -> Tuple[int, int]:\n c_trained, c_total = 0, 0\n for p in self.parameters():\n increment = reduce(lambda x, y: x * y, p.size())\n if p.requires_grad:\n c_trained += increment\n c_total += increment\n return c_trained, c_total", "def count_total_params(model):\n trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n non_trainable_count = int(\n numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n return trainable_count, non_trainable_count", "def num_vars(self):\n return len(self.bounds.lb)", "def num_params(architecture): #\n \n total_parameters = 0\n for layer in range(1,len(architecture)+1):\n weight_dims = np.shape(architecture['layer{}'.format(layer)][2])\n try:\n params = weight_dims[0]*weight_dims[1]*weight_dims[2]\n except:\n try:\n params = weight_dims[0]*weight_dims[1]\n except:\n try:\n params = weight_dims[0]\n except:\n params = 0\n total_parameters += params\n return total_parameters", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def num_training_examples(self):", "def count_parameters(sess):\n\n variables_names = [v.name for v in tf.trainable_variables()]\n values = sess.run(variables_names)\n n_params = 0\n\n for k, v in zip(variables_names, values):\n print '-'.center(140, '-')\n print '{:60s}\\t\\tShape: {:20s}\\t{:20} parameters'.format(k, v.shape, v.size)\n\n n_params += v.size\n\n print '-'.center(140, '-')\n print 'Total # parameters:\\t\\t{}\\n\\n'.format(n_params)\n\n return n_params", "def nvar(self):\n return len(self.__vars)", "def get_total_trainable_parameter_size():\n total_parameters = 0\n import tensorflow as tf\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n total_parameters += np.product([x.value for x in variable.get_shape()])\n return total_parameters", "def num_vars(self):\n return self._nvars", "def variables_num(self):\n return 1", "def get_model_count(self):\n return len(self._model_start_i)", "def countVars(self):\n return len(self.initializedVars[\"GF\"]) + len(self.initializedVars[\"LF\"]) + len(self.initializedVars[\"TF\"])", "def count_parameters(net):\r\n return sum(p.numel() for p in net.parameters() if p.requires_grad)", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios" ]
[ "0.8517904", "0.83146226", "0.8285639", "0.8231243", "0.8113813", "0.8113813", "0.8113813", "0.8045275", "0.80263823", "0.8018176", "0.8015244", "0.8014556", "0.7949592", "0.7944686", "0.79414004", "0.79262877", "0.7919125", "0.7889194", "0.78718674", "0.7828952", "0.7804306", "0.7774453", "0.7772432", "0.7762724", "0.77623785", "0.77521557", "0.7743684", "0.77140397", "0.768846", "0.7655662", "0.7647408", "0.7639085", "0.7637102", "0.7616038", "0.7589194", "0.7569949", "0.7569949", "0.7553899", "0.74987245", "0.74987245", "0.7459711", "0.745538", "0.7452005", "0.74358296", "0.7429189", "0.7425402", "0.7405578", "0.73960024", "0.737876", "0.7350752", "0.73434615", "0.73373955", "0.73242843", "0.72716993", "0.72694486", "0.7263742", "0.7263742", "0.7260358", "0.7242898", "0.7242898", "0.7242898", "0.7242898", "0.72382474", "0.72158426", "0.72158426", "0.72008735", "0.7171036", "0.7161786", "0.71509284", "0.71301883", "0.70888627", "0.7087313", "0.7070109", "0.7067998", "0.6982927", "0.6982639", "0.69730264", "0.69730264", "0.6971918", "0.6946629", "0.69456434", "0.6939419", "0.6919869", "0.6914924", "0.6902689", "0.6902235", "0.6894223", "0.68872696", "0.6878443", "0.6877234", "0.68530184", "0.68492556", "0.6846964", "0.6843522", "0.68422836", "0.68407154", "0.68256176", "0.6797769", "0.6797769", "0.6797769" ]
0.7406236
46
True if model contains any active (nondefault) parameters.
def __bool__(self): return any(p for p in self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_active(self) -> bool:\n return any(x is not None for x in self._constraints)", "def params_required(self) -> bool:\n if self.no_params or self.params_optional:\n return False\n else:\n return True", "def no_params(self) -> bool:\n result = True\n # Fixing issue #92\n if self.properties.parameters:\n return False\n else:\n return True\n # for parameter in self.properties.parameters:\n # if parameter == \"effect\":\n # continue\n # else:\n # result = False\n # break\n # return result", "def is_model(model: Model) -> bool:\r\n for key in model:\r\n if not (is_variable(key) and type(model[key]) is bool):\r\n return False\r\n return True", "def check_params(self, model_params):\n return model_params", "def has_learned_parameters(self) -> bool:\n return any(\n len(params) > 0 for (_, params) in self.get_optimizer_params().items()\n )", "def is_initialized(self) -> bool:\n return (\n (self._exchange_params_by_currency_id is not None)\n and (self._utility_params_by_good_id is not None)\n and (self._transaction_fees is not None)\n )", "def _check_whether_has_params(self, params) -> bool:\n\n if params:\n return True\n return False", "def hasRequiredAttributes(self):\n return _libsbml.LocalParameter_hasRequiredAttributes(self)", "def hasRequiredAttributes(self):\n return _libsbml.Parameter_hasRequiredAttributes(self)", "def params_optional(self) -> bool:\n result = True\n if self.no_params:\n # We will return False, because there are no params at all - optional or not.\n return False\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n # We should allow you to print out the options to a YAML file and fill it out like a form.\n # So right now, it will create a long Kubernetes policy, but it will have lots of empty lists that we have to fill out. Oh well.\n if not parameter_details.default_value:\n # if not parameter.default_value and parameter.default_value != [] and parameter.default_value != \"\":\n result = False\n break\n return result", "def has_activations(self):\n # pylint: disable=not-an-iterable\n for _ in self.activations:\n return True\n return False", "def is_valid(self):\n return self.is_active", "def has_attributes(self):\n\n pass", "def is_model(model: Model) -> bool:\n for key in model:\n if not is_variable(key):\n return False\n return True", "def isActive(self):\n return self.sess is not None and self.sess.isValid()", "def is_active(self) -> bool:\n return not any((self.is_ancillary, self.is_removed, self.is_system))", "def has_attributes(self):\n return bool(self.attrs)", "def hasRequiredAttributes(self):\n return _libsbml.ModelCreator_hasRequiredAttributes(self)", "def use_ppmodel(self):\n return hasattr(self, \"ppmodel\")", "def isVwraysParameters(self):\n return True", "def is_active(self) -> bool:", "def hasRequiredAttributes(self):\n return _libsbml.FluxObjective_hasRequiredAttributes(self)", "def active(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"active\")", "def hasRequiredAttributes(self):\n return _libsbml.Objective_hasRequiredAttributes(self)", "def is_active(self):\n return self.type_id in ACTIVE_STATES", "def hasRequiredAttributes(self):\n return _libsbml.ModelHistory_hasRequiredAttributes(self)", "def isActiveFitParam(param):\n return isFitParam(param) and param.isActive()", "def has(self, param):\n\n if param in self.params:\n return True\n\n return False", "def lazy_parmeters_determined(self):\n return self._lazy_ready and all([\n not isinstance(getattr(self, x), UninitializedParameter)\n for x in self.lazy_parameter_names])", "def hasRequiredAttributes(self):\n return _libsbml.ExternalModelDefinition_hasRequiredAttributes(self)", "def are_any_attributes_visible(self):\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n continue\n\n if isinstance(type_instance, bool) and type_instance == True:\n return True\n elif isinstance(type_instance, self.__class__) and \\\n type_instance.are_all_attributes_visible() == True:\n return True\n\n return False", "def filters_active(self):\n if self.is_valid():\n return bool(\n {\n k: v\n for k, v in self.cleaned_data.items()\n if k not in [\"q\", \"sort\"] and bool(v)\n }\n )\n return False", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")", "def _is_ready(self):\n res = True\n for (key, val) in self._attrs.iteritems():\n if key not in self._optional_attrs:\n if val is None:\n res = False\n return res", "def isActive(obj):\n wf = getToolByName(obj, 'portal_workflow')\n if (hasattr(obj, 'inactive_state') and obj.inactive_state == 'inactive') or \\\n wf.getInfoFor(obj, 'inactive_state', 'active') == 'inactive':\n return False\n if (hasattr(obj, 'cancellation_state') and obj.inactive_state == 'cancelled') or \\\n wf.getInfoFor(obj, 'cancellation_state', 'active') == 'cancelled':\n return False\n return True", "def has_default_parameters(self):\n return self._compat_flags[0] & (0x1 << 0)", "def hasRequiredAttributes(self):\n return _libsbml.Port_hasRequiredAttributes(self)", "def hasRequiredAttributes(self):\n return _libsbml.Submodel_hasRequiredAttributes(self)", "def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True", "def is_empty(self):\n return not bool(self._model_sigs)", "def _isActive(self, inPars):\n\t\tfor f in self.inputKeys:\n\t\t\tif f.name not in inPars:\n\t\t\t\treturn False\n\t\treturn True", "def isSetModel(self):\n return _libsbml.SBMLDocument_isSetModel(self)", "def has_active_jobs(self, **kwargs):\n if Job.objects.add_balance().filter(house=self.house, balance1__gt=0, approved=True, **kwargs).exists():\n return True\n\n return False", "def is_enabled(self):\n for arch in self.inputs:\n if arch.place.M < arch.weight:\n return False\n return True", "def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_endpoint_parameters_enabled\")", "def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_endpoint_parameters_enabled\")", "def is_initialized(self) -> bool:\n return (\n self._amount_by_currency_id is not None\n and self._quantities_by_good_id is not None\n )", "def active(self):\n return self.home is not None and self.away is not None and self.winner is None", "def IsActive(self):\n return True", "def are_all_attributes_visible(self):\n\n for attribute_name, type_instance in inspect.getmembers(self):\n\n if attribute_name.startswith('__') or inspect.ismethod(type_instance):\n # Ignore parameters with __ and if they are methods\n continue\n\n if isinstance(type_instance, bool) and type_instance == False:\n return False\n elif isinstance(type_instance, self.__class__) and \\\n type_instance.are_all_attributes_visible() == False:\n return False\n\n return True", "def is_active(self):\n return self._is_record_status(self.ACTIVE)", "def active(self):\n\n return True", "def getBoolParam(self, params, name):\n return params.get(name) in ('True', 'true', '1')", "def negotiated(self) -> bool:\n return self.__parameters.negotiated", "def _has_filters(self):\n return self.query.has_filters()", "def active(self) -> bool:\n return pulumi.get(self, \"active\")", "def is_active(self) -> bool:\n return self.active == \"active\"", "def valid(self) -> bool:\n are_populated = [bool(getattr(self, fld_nm)) for fld_nm in self.necessary_fields]\n return all(are_populated)", "def available(self):\n fields = self._meta.get_fields()\n\n for field in fields:\n if isinstance(field, models.ManyToManyRel):\n attr = field.get_accessor_name()\n\n if getattr(self, attr).count() > 0:\n return False\n\n elif isinstance(field, models.OneToOneRel):\n attr = field.get_accessor_name()\n if getattr(self, attr, None):\n return False\n\n return True", "def contains_param(self,param):\n contains = False\n for filter in self:\n if getattr(filter, param, None) is not None:\n contains = True\n break\n\n return contains", "def attribute(self):\n\n return not bool(self.arguments)", "def has_custom_param(plot):\n return Plot.has_custom_param(plot)", "def hasOptionalAttributes(self):\n return _libsbml.SBase_hasOptionalAttributes(self)", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def is_active(self):\n return True", "def restoreControllerParams(self):\r\n return not self._dontRestoreControllerParamsButton.IsSelected()", "def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False", "def hasRequiredAttributes(self):\n return _libsbml.OutwardBindingSite_hasRequiredAttributes(self)", "def isSetActiveObjective(self):\n return _libsbml.ListOfObjectives_isSetActiveObjective(self)", "def hasActiveConfiguration(self):\n cal = self.request.get('form.widgets.calendarConfig')\n if cal is not None:\n if cal == ['non actif'] or cal == ['bloque']:\n return False\n else:\n return True\n wrapper = getSAWrapper('gites_wallons')\n session = wrapper.session\n for heb in getHebergementsForProprio(self.context, session):\n return (heb.heb_calendrier_proprio != 'non actif')", "def _mixed_precision_enabled_for_params(self) -> bool:\n return self.mixed_precision.param_dtype is not None", "def hasRequiredAttributes(self):\n return _libsbml.FbcAnd_hasRequiredAttributes(self)", "def is_active(self):\r\n return True", "def has_parameter(self, name):\n for par in self.params:\n if par.name == name:\n return True\n return False", "def is_active():\n return True", "def hasRequiredAttributes(self):\n return _libsbml.FbcAssociation_hasRequiredAttributes(self)", "def hasRequiredAttributes(self):\n return _libsbml.InitialAssignment_hasRequiredAttributes(self)", "def hasRequiredAttributes(self):\n return _libsbml.Input_hasRequiredAttributes(self)", "def hasRequiredAttributes(self):\n return _libsbml.Trigger_hasRequiredAttributes(self)", "def requires_model_loading(self):\n return self.requires_loaded_models" ]
[ "0.67918104", "0.67808914", "0.6769819", "0.676107", "0.6671744", "0.6616392", "0.6594647", "0.6546123", "0.65182585", "0.6490626", "0.64229697", "0.63208073", "0.62987643", "0.62911", "0.6290869", "0.6289151", "0.62316024", "0.6204833", "0.616754", "0.61671543", "0.61343193", "0.61329806", "0.61322534", "0.61055845", "0.61010337", "0.6091068", "0.60819536", "0.6073182", "0.6068924", "0.6067556", "0.6065384", "0.60606515", "0.6053119", "0.6044574", "0.6044574", "0.6044574", "0.6044574", "0.602329", "0.59857106", "0.5985512", "0.59532267", "0.59505737", "0.5932511", "0.59231555", "0.59203917", "0.5910401", "0.590443", "0.59006876", "0.58973366", "0.58973366", "0.58886445", "0.58835167", "0.58693767", "0.5864382", "0.58435756", "0.5842103", "0.58370364", "0.58369434", "0.58299947", "0.5827135", "0.5826909", "0.58207875", "0.5814708", "0.5797499", "0.5791622", "0.578537", "0.5782511", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5768975", "0.5754027", "0.5753563", "0.5751955", "0.57490855", "0.5746931", "0.57462525", "0.57447207", "0.57321566", "0.5728956", "0.5710862", "0.570648", "0.5681377", "0.5681249", "0.5680543", "0.5679627" ]
0.0
-1
Iterate over parameter objects.
def __iter__(self): return self.params.values().__iter__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_params(self):\n for var, val in self._params.iteritems():\n yield var, val", "def parameters(self):\n for parameters in self:\n for parameter in parameters:\n yield parameter", "def get_params_iter(self):\n return []", "def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n items = sorted(p.items())\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params", "def iter_jobs(self):\n for param in self._parameters:\n yield param", "def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:\n for i in args:\n if hasattr(i, \"__parameters__\"):\n yield from i.__parameters__\n elif isinstance(i, TypeVar):\n yield i", "def dsn_param_iter(self) -> Iterable[Tuple[str, Dict[str, Any]]]:\n for combo in self.swp_combo_iter():\n yield self.get_design_name(combo), self._get_params(combo)", "def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n modstr = '%s__' % self.modality\n items = sorted([(k.replace('clf__'+modstr, ''), v) for k, v in p.items() if modstr in k])\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params", "def _determine_parameters(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Parameter(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Parameter(var, dim)", "def _params(self, obj):\n if isinstance(obj, BaseParameter):\n return [obj]\n elif isinstance(obj, BaseModule):\n return obj.parameters\n elif isinstance(obj, list):\n return self._list_params(obj)\n elif isinstance(obj, dict):\n return self._dict_params(obj)\n else:\n return []", "def __iter__(self):\n return dict(self.parameters)", "def __iter__(self):\n return iter(vars(self.obj))", "def variableIter(self):\n for (para, start), variable in self.variables.iteritems():\n yield para, start, variable", "def get_params(self, deep=...):\n ...", "def __iter__(self) -> Tuple[str, Any]:\n for attr_name, attr_val in self.__dict__.items():\n yield attr_name, attr_val", "def find_params(cls):\n the_classes = cls.mro()\n params = odict()\n for the_class in the_classes:\n for key, val in the_class.__dict__.items():\n if isinstance(val, Parameter):\n params[key] = val\n return params", "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def iterparams(params: Dict[str, List[Any]]) -> Dict[str, Any]:\n for set in product(*params.values()):\n yield dotdict(zip(params.keys(), set))", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def params(self) -> Tuple[Parameter, ...]:\n raise NotImplementedError()", "def get_params_iter(self):\n return itertools.chain(np.nditer(self.W, op_flags=['readwrite']),\n np.nditer(self.b, op_flags=['readwrite']))", "def params():\n raise NotImplementedError", "def iterate(cls):\n for name, value in vars(cls).iteritems():\n if name.startswith('__') or not isinstance(value, int):\n continue\n yield (value, name)", "def parameters(self):", "def __iter__():", "def __iter__():", "def __iter__():", "def __iter__():", "def printfunc(self, params, iter, resid, *args, **kwargs):\n\n print(iter) \n print(params.valuesdict())", "def get_params(self):", "def iterate(cls):\n for name, value in vars(cls).iteritems():\n if name.startswith('__'):\n continue\n yield (name, value)", "def _get_parameters(self) -> list:\n return self.parameters", "def __iter__(self):\r\n for attr, value in self.__dict__.items():\r\n a = getattr(self, attr)\r\n if type(a) is list:\r\n if len(a) > 0:\r\n yield attr, a", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def _get_orig_params(\n self,\n module: nn.Module,\n ignored_params: Set[nn.Parameter],\n ) -> Iterator[nn.Parameter]:\n param_gen = module.parameters()\n try:\n while True:\n param = next(param_gen)\n if param not in ignored_params and not _is_fsdp_flattened(param):\n yield param\n except StopIteration:\n pass", "def check_params(cls, **kwargs) -> None:\n\n for key, val in kwargs.items():\n cls.check_param(key, val)", "def iterate_inputs(function, type_to_vars):\n if isinstance(function.input_type, tuple):\n input_types = list(function.input_type)\n else:\n input_types = [function.input_type]\n\n argslists = []\n for input_type in input_types:\n argslists.append(type_to_vars[input_type])\n for args in itertools.product(*argslists):\n yield args", "def get_parameters(self, parameters):\n for p in parameters:\n setattr(self, p, parameters[p].value)\n self.set_symmetry()", "def iterate(self):", "def __iter__(self):\n for name, field in self.iterate_over_fields():\n yield name, field", "def get_params(self, deep=True):\n return {p: getattr(self, p) for p in self.params}", "def set_params(self, params):\n for step_id, step_params in _iteritems(params):\n for name, value in _iteritems(step_params):\n self.add_param(step_id, name, value)", "def get_named_parameters(self):\n for name, _ in self.module_to_quantize.named_parameters():\n yield name, getattr(self, name)", "def params(self):\n params = []\n\n for v in vars(self).values():\n params.extend(self.__computeParams(v))\n\n if isinstance(v, list):\n for p in v:\n params.extend(self.__computeParams(p))\n\n return params", "def __iter__(self):\n return (x for x in vars(self))", "def __iter__(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield getattr(self, attribute_name)", "def task_parameters(self):\n yield self.properties", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def parameters(self):\n pass", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def parameters(self):\n return self._params", "def iterate(self):\n raise NotImplementedError()", "def parameters(self):\n raise NotImplementedError('Abstract method \"parameters\" must be '\n 'specialised!')", "def __iter__(self):\n return iter(self._vars)", "def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}", "def set_params(self, parameters: dict = {}):\n for param in parameters:\n for step in self.steps:\n if param.lower() == step[0].lower():\n step[1].set_params(parameters[param])", "def get_params(self):\n pass", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def params(self) -> Iterable[sympy.Symbol]:\n for i in range(self.iterations):\n for p in range(len(self.qubits)):\n if (self.include_all_z or not\n numpy.isclose(self.orbital_energies[p], 0)):\n yield LetterWithSubscripts('U', p, i)\n for p, q in itertools.combinations(range(len(self.qubits)), 2):\n if (self.include_all_cz or not\n numpy.isclose(self.hamiltonian.two_body[p, q], 0)):\n yield LetterWithSubscripts('V', p, q, i)", "def params(self):\n pass", "def get_params(self):\n raise NotImplementedError", "def get_params(self, deep = True, bounds = True):\n params = dict() \n for p in self._LIST_PARAMETERS:\n params[p] = self._get_one_param(p)\n if(bounds):\n params[p + '_bounds'] = self._get_one_bound(p)\n if(deep and self._FLAG_TYPE == 'collection' and p == 'list_func'):\n for n, sub_obj in enumerate(params[p]):\n sub_params = sub_obj.get_params(deep, bounds)\n params.update({'f' + str(n) + '__' + key: val for key, val in sub_params.items()})\n \n return params", "def iter_components(self):\n for iv in range(len(self._var_names)):\n yield self._var_names[iv], self._vals[iv]", "def parameter_bounds(self):\n for name, bound in self.named_parameter_bounds():\n yield bound", "def all(self):\n datapoint_params = self._make_datapooint_param_iter()\n if datapoint_params is None:\n return iter([])\n params_list = list(datapoint_params) # construct param list\n return self._gen(params_list)", "def _update_parameters(self, topology, parameters):\n for pkey, parameter in self.parameters.items():\n\n value = parameters[pkey]\n name = parameter.attr_name()\n key = parameter.key()\n\n if isinstance(parameter, NodeParameter):\n topology.node_attribute(key=key, name=name, value=value)\n elif isinstance(parameter, EdgeParameter):\n topology.edge_attribute(key=key, name=name, value=value)\n else:\n msg = \"Parameter {} is neither a node nor an edge parameter! {}\"\n raise TypeError(msg.format(type(parameter)))", "def __iter__(self) -> (str, np.ndarray):\n for k, v in self.fields.items():\n yield k, v", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def parameters(names, **kwargs):\n sequence_fields = ['value', 'min', 'max', 'fixed']\n sequences = {}\n for attr in sequence_fields:\n try:\n iter(kwargs[attr])\n except (TypeError, KeyError):\n # Not iterable or not provided\n pass\n else:\n sequences[attr] = kwargs.pop(attr)\n\n if 'min' in sequences and 'max' in sequences:\n for min, max in zip(sequences['min'], sequences['max']):\n if min > max:\n raise ValueError('The value of `min` should be less than or'\n ' equal to the value of `max`.')\n\n params = symbols(names, cls=Parameter, seq=True, **kwargs)\n for key, values in sequences.items():\n try:\n assert len(values) == len(params)\n except AssertionError:\n raise ValueError(\n '`len` of keyword-argument `{}` does not match the number of '\n '`Parameter`s created.'.format(attr)\n )\n except TypeError:\n # Iterator do not have a `len` but are allowed.\n pass\n finally:\n for param, value in zip(params, values):\n setattr(param, key, value)\n return params", "def _dict_params(self, the_dict: Dict):\n return [p for _, e in the_dict.items() for p in self._params(e)]", "def iteritems(self):", "def item_iter(self, *a):\r\n raise NotImplementedError", "def get_all_param_values(layer):\n params = get_all_params(layer)\n return [p.get_value() for p in params]", "def traverse(self, visit, *args, **kwargs):\n if not self.__visited:\n visit(self, *args, **kwargs)\n self.__visited = True\n for c in self.parameters:\n c.traverse(visit, *args, **kwargs)\n self.__visited = False", "def __iter__(self):\n for val in self.value:\n yield val", "def get_next_params(self) -> dict:\n params = {arg_name: caller() for arg_name, caller in self.parameters}\n return params", "def _get_param_iterator(self):\n return model_selection.ParameterGrid(self.param_grid)", "def _get_param_iterator(self):\n return model_selection.ParameterGrid(self.param_grid)", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def get_params(node):\n if node.type == 'parameter':\n return [(self.descend(node.args[0]), types.translation[self.descend(node.args[1])])]\n else:\n l = []\n for p in node.args:\n l.extend(get_params(p))\n return l", "def params(self) -> List[ParamSpec]:\n return self._params", "def parameters(self):\n return [i for i in self.variables if has_roles(i, Parameter)]", "def parameters(self):\n return []", "def iter_specified(self):\n for feat in self.features:\n val = self[feat]\n if val not in ['?', 'u', None]:\n yield (feat, val)", "def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)", "def _list_params(self, the_list: List):\n return [p for e in the_list for p in self._params(e)]", "def get_params(self):\n return list(self.params.values())", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def iter_fields(self, named=None, **kwargs): # pylint: disable=W0613\n # Note: using 'with' here is better than making a shell copy\n if named is not None:\n for name in named:\n with self.fields[name] as f:\n yield f\n else:\n for fld in self.fields:\n with fld as f:\n yield f", "def iteritems(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield (attribute_name, getattr(self, attribute_name))", "def get_params(self, pnames=None):\n l = []\n if pnames is None:\n pnames = self._params.keys()\n for pname in pnames:\n p = self._params[pname]\n if isinstance(p, Parameter):\n l.append(p)\n return l", "def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def print_params(self):\n print(self._list_params())", "def __iter__(self) -> Iterable[MemoryVariable]:\n return iter(self.variables)", "def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values", "def calculate_parameters(self, item):", "def __iter__(self):\n for valTuple in itertools.product(*self._valListOfLists):\n valDict = dict(zip(self._keyTuple, valTuple))\n name=self.format(valDict)\n yield RepositoryInfo(keyTuple=self._keyTuple, valTuple=valTuple, dtype=self._dtype, name=name)", "def map(_, params):\n import numpy as np\n from itertools import product\n from random import shuffle\n\n if 'param_set' in params:\n parameter_sets = params['param_set']\n else:\n alphas = params['alphas']\n Vs = params['Vs']\n gammas = params['gammas']\n parameter_sets = [item for item in product(alphas, gammas, Vs)]\n shuffle(parameter_sets)\n\n ## discretize the parameter configurations and equitably distribute\n ## them for the next map instance to deal with.\n chunk_length = len(parameter_sets)/params['nprocs']\n leftover = len(parameter_sets) % params['nprocs']\n for n in xrange(params['nprocs']):\n if n < leftover:\n left = n*(1+chunk_length)\n to_yield = parameter_sets[left:left+1+chunk_length]\n else:\n left = leftover*(1+chunk_length) + (n-leftover)*chunk_length\n to_yield = parameter_sets[left:left+chunk_length]\n #print n, to_yield, len(to_yield)\n yield (n, to_yield)" ]
[ "0.82385904", "0.7981038", "0.71825755", "0.7117291", "0.7055745", "0.69547874", "0.6832161", "0.67801243", "0.67296636", "0.66869706", "0.6610378", "0.6579612", "0.64453834", "0.6407792", "0.6394474", "0.6297057", "0.62369", "0.6233893", "0.6227109", "0.6217013", "0.62052274", "0.61991006", "0.61382854", "0.612272", "0.61124295", "0.61124295", "0.61124295", "0.61124295", "0.61053663", "0.6101951", "0.6087233", "0.60774535", "0.60572034", "0.6054583", "0.6039808", "0.5995063", "0.59935105", "0.59883296", "0.5986138", "0.5981047", "0.5978028", "0.5964101", "0.5956373", "0.5954194", "0.59532136", "0.59089094", "0.5908023", "0.58949065", "0.58873653", "0.588427", "0.58738047", "0.5858942", "0.5848778", "0.5844865", "0.5840906", "0.5832469", "0.58322537", "0.5828436", "0.58281285", "0.5827477", "0.58257127", "0.581407", "0.5802668", "0.5797677", "0.5797261", "0.5791391", "0.5789703", "0.57853097", "0.57802993", "0.5777391", "0.5777357", "0.5764374", "0.5762417", "0.5727059", "0.5723443", "0.57232475", "0.57165074", "0.57165074", "0.5701774", "0.5700741", "0.5697619", "0.568923", "0.56823933", "0.56576645", "0.56548494", "0.5653751", "0.564703", "0.56292385", "0.56272674", "0.5624002", "0.56029963", "0.5600956", "0.5596183", "0.55934757", "0.5581819", "0.5574369", "0.5573563", "0.5568215", "0.5556577", "0.5551881" ]
0.7283732
2
Justified (name, value, units, doc) strings for active parameters.
def param_strs(self): name_len = max(len(p.name) for p in self) value_len = max(len(p.value_str) for p in self.params.values()) units_len = max(len(p.units) for p in self.params.values()) return [(p.name.ljust(name_len), p.value_str.ljust(value_len), p.units.ljust(units_len), p.__doc__) for p in self.params.values() if p]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n num_active = len([p for p in self if p])\n summary = \"%s has %d parameters with %d active (non-default)\" % \\\n (self.__class__.__name__, len(self), num_active)\n if num_active == 0:\n return summary\n return summary + ':\\n' + '\\n'.join(('%s = %s %s (%s)' % ps)\n for ps in self.param_strs())", "def __str__(self):\r\n res = [self.Name + ' parameters:']\r\n for t in self._tracked_properties:\r\n res.append(t + ':' + str(getattr(self, t)))\r\n for k, v in sorted(self.Params.items()):\r\n res.append(str(k) + ':' + str(v))\r\n return '\\n'.join(res)", "def display_parameters(self):\n l = []\n for param in self.parameters.all():\n if len(param.value) > 16:\n l.append(u\"{}={}...\".format(param.name, param.value[:16]))\n else:\n l.append(u\"{}={}\".format(param.name, param.value))\n return \"; \".join(l)", "def __parameters_string(self):\n if self._parameters == list():\n return ''\n\n docstring = \"\"\"\n\nParameters:\n\"\"\"\n \n # Compute maximum length of any parameter name\n maxlen = 0\n for param in self._parameters:\n maxlen = max(maxlen, len(param[0]))\n\n # Build documentation for parameters\n for (on_param, param) in enumerate(self._parameters):\n if on_param > 0:\n docstring += '\\n'\n\n docstring += ' ' + param[0].ljust(maxlen + 2)\n doc = wrap(param[1], columns - maxlen - 4)\n padding = str('')\n for line in doc.split('\\n'):\n docstring += padding + line + '\\n'\n padding = str('').ljust(maxlen + 4)\n \n # Pull off the final '\\n'\n return docstring[0:len(docstring)-1]", "def __str__(self):\n return \"{}: {} params, wires {}\".format(self.name, len(self.params), self.wires)", "def format_freeform_params(self):\n return self.format_param_pairs(self.get_freeform_reg_params())", "def params_desc(self):\n return \"{}/{}/{}/{}\".format(\n self.learning_rate, self.movement, self.milestones, self.gamma\n )", "def _parameter_summary(self, parameters, parameters_to_show=4):\n params = parameters\n if len(parameters) > parameters_to_show:\n params = parameters[:2] + [\"...\"] + parameters[-2:]\n return \", \".join(params)", "def _format_parameter_output(self, parameters: dict) -> str:\n \n output = ''\n for key, value in parameters.items():\n output = output + '\\t\\t' + str(key) + ': ' + str(value) + '\\n'\n \n return output", "def param_str(self, pnames=None):\n l = self.get_params(pnames)\n s = \"\"\n for p in l:\n s += \"%s : %s\\n\" % (p.public_name, p.tostr(self))\n return s", "def display_parameters(self):\n\n self.logging.debug(\"============\")\n for attr in self.parm_list:\n self.logging.debug(attr.label + \" (\" + attr.when + \")\" + \" = \" + str(attr.value))\n self.logging.debug(\"============\")", "def _format_parameterArray(self):\n return \"{%s; %s}\" % tuple(', '.join(str(x) for x in l)\n for l in self.parameterArray())", "def _params_formatter(field, description):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(rst.escape(field['name']))\n tail = description\n return heads, tail", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def _params_formatter(field):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(field['name'])\n tail = field.get('description', '')\n return heads, tail", "def _write_params(self, size):\n msg = []\n if self.params:\n msg = ['$PARAMS\\n']\n for (key, param) in sorted(self.params.iteritems()):\n msg.append(param.print_card(size))\n return ''.join(msg)", "def __make_description(self, param_name):\n value = self._status.get_value(param_name)\n if round(value) != value:\n # Parameter is a float. Limit to three decimals.\n value = \"%.3f\" % (value)\n\n return \"%s (%s)\" % (param_name, str(value))", "def __str__(self):\n return 'Tensor product {}: {} params, wires {}'.format([i.name for i in self.obs], len(self.params), self.wires)", "def print_params(self):\n s = self._list_params()+\"\\n\"\n if 'scale_params' in self.__dict__.keys():\n s += self.scale_params._list_params()+\"\\n\"\n if 'atmospheric_params' in self.__dict__.keys():\n if self.atmospheric_params is not None:\n s += self.atmospheric_params._list_params()+\"\\n\"\n\n if 'atemperature_params' in self.__dict__.keys():\n if self.atemperature_params is not None:\n s += self.atemperature_params._list_params()+\"\\n\"\n\n if 'oceanic_params' in self.__dict__.keys():\n if self.oceanic_params is not None:\n s += self.oceanic_params._list_params()+\"\\n\"\n\n if 'ground_params' in self.__dict__.keys():\n if self.ground_params is not None:\n s += self.ground_params._list_params()+\"\\n\"\n\n if 'gotemperature_params' in self.__dict__.keys():\n if self.gotemperature_params is not None:\n s += self.gotemperature_params._list_params() + \"\\n\"\n\n print(\"Qgs v0.2.8 parameters summary\")\n print(\"=============================\\n\")\n print(s)", "def __make_description(self, param_name):\n value = self._params.get_value(param_name)\n return \"%s (Currently %s)\" % (param_name, str(value))", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def format_params(self, params):\n return params", "def pretty(self):\n def arg_to_str(name, value):\n if value is True:\n return '+' + name\n elif value is False:\n return '~' + name\n elif isinstance(value, Var):\n if value.name == name:\n return '?' + value.name\n return name + \"=\" + value.name\n else:\n return name + \"=\" + repr(value)\n\n if len(self.args) == 0:\n return self.name\n return \"{}[{}]\".format(self.name,\n \", \".join(arg_to_str(name, value)\n for name, value in self.args))", "def __str__(self):\n return '\\tHandle: %(handle)d (0x%(handle).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'handle': self.handle, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\n return '\\tHandle: %(handle)d (0x%(handle).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'handle': self.handle, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def __str__(self):\n return '\\tCode: %(code)d (0x%(code).2x)\\n' \\\n '\\tLength: %(length)d (0x%(length).2x)\\n' \\\n '\\tParameters:\\n' \\\n '\\t\\t%(data)s' % {'code': self.code, 'length': self.length,\n 'data': self.format_raw_data(self.parameters)}", "def summary_parameters(self):\n text = re.sub(r'\\r?\\n', ' - ', self.opt.text[:200])\n return {'adding': text}", "def __str__(self):\n\n return \"<ExoParameter>: {0}\".format(self.__dict__)", "def _disp_props(self):\n ret = list()\n if self.required:\n ret.append('required')\n if self.default:\n ret.append('default=%s' % self.default)\n return ret", "def description(self):\n active = np.nonzero([bool(p) for p in self])[0]\n last_active = active[-1] if len(active) else -1\n return ' '.join([p.value_str for p in self][:last_active + 1])", "def call_str(pvs):\n s = \"'{}', '{}'\".format(pvs.get('place'), pvs.get('stat_var'))\n if pvs.get('measurement_method'):\n s += \", measurement_method='{}'\".format(\n pvs.get('measurement_method'))\n if pvs.get('observation_period'):\n s += \", observation_period='{}'\".format(\n pvs.get('observation_period'))\n if pvs.get('unit'):\n s += \", unit='{}'\".format(pvs.get('unit'))\n if pvs.get('scaling_factor'):\n s += \", scaling_factor={}\".format(pvs.get('scaling_factor'))\n return s", "def get_string(self, check=True):\n\n if check:\n self.check()\n\n # generate the string\n s = []\n for qsection, qsec_parms in Q_PARAMETERS.iteritems():\n if not qsection in self.parameters:\n continue\n s.append(\"[%s]\" % qsection)\n if \"group_contribution\" in qsection or \"restraints\" in qsection:\n s.extend(self.parameters[qsection])\n elif \"lambda\" in qsection:\n s.append(self.parameters[qsection])\n else:\n for key,value in qsec_parms.iteritems():\n if key in self.parameters[qsection]:\n s.append(\"%-20s %30s\" % (key,self.parameters[qsection][key]))\n\n s.append(\"\")\n return \"\\n\".join(s)", "def print_attr(self):\n return \"name : {0}\\nprice : {1}\\ndescription : {2}\".format(\n self.name, self.price, self.description\n )", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"input_dim = {}\\n\".format(self.input_dim)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def _format_parameterArray_ascii(self):\n art = ascii_art(*sum(zip([ascii_art(x)\n for l in self.parameterArray() for x in l],\n ([\", \"] * (self.classes()-1) + [\"; \"]) * 2),\n ())[:-1])\n return ascii_left_curly_brace.character_art(art.height()) + art \\\n + ascii_right_curly_brace.character_art(art.height())", "def __str__(self):\n return self.parameters.__str__()", "def __str__(self) -> str:\n str_ = self.method\n if self.basis is not None:\n str_ += f'/{self.basis}'\n if self.auxiliary_basis is not None:\n str_ += f', auxiliary_basis: {self.auxiliary_basis}'\n if self.dispersion is not None:\n str_ += f', dispersion: {self.dispersion}'\n if self.cabs is not None:\n str_ += f', cabs: {self.cabs}'\n if self.solvation_method is not None:\n str_ += f', solvation_method: {self.solvation_method}'\n if self.solvent is not None:\n str_ += f', solvent: {self.solvent}'\n if self.solvation_scheme_level is not None:\n str_ += f\", solvation_scheme_level: '{str(self.solvation_scheme_level)}'\"\n if self.software is not None:\n str_ += f', software: {self.software}'\n if self.software_version is not None:\n str_ += f', software_version: {self.software_version}'\n if self.args is not None and self.args and all([val for val in self.args.values()]):\n if any([key == 'keyword' for key in self.args.keys()]):\n str_ += ', keyword args:'\n for key, arg in self.args.items():\n if key == 'keyword':\n str_ += f' {arg}'\n if self.method_type is not None:\n str_ += f' ({self.method_type})'\n return str_", "def format_parameter(param, required):\n\n param_string = check_param(flatten_param(param))\n if not required:\n param_string += '=None'\n return param_string", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def _get_params_summary(self, alpha=0.1):\n\n # TODO: Acknowledge that this code was modified from the statsmodels package\n\n results = self._model.fit()\n\n def forg(x, prec=3):\n if prec == 3:\n # for 3 decimals\n if (abs(x) >= 1e4) or (abs(x) < 1e-4):\n return '%9.3g' % x\n else:\n return '%9.3f' % x\n elif prec == 4:\n if (abs(x) >= 1e4) or (abs(x) < 1e-4):\n return '%10.4g' % x\n else:\n return '%10.4f' % x\n else:\n raise NotImplementedError\n\n # Parameters part of the summary table\n conf_int = results.conf_int(alpha)\n\n # Dictionary to store the header names for the parameter part of the\n # summary table. look up by modeltype\n alp = str((1 - alpha) * 100) + '%'\n\n param_header = ['coef', 'std err', 't', 'P>|t|',\n '[' + alp + ' Conf. Int.]']\n\n xname = self._model.exog_names\n\n params_stubs = xname\n\n exog_idx = range(len(xname))\n\n # center confidence intervals if they are unequal lengths\n confint = [\"%s %s\" % tuple(map(forg, conf_int.ix[i])) for i in exog_idx]\n len_ci = list(map(len, confint))\n max_ci = max(len_ci)\n min_ci = min(len_ci)\n\n if min_ci < max_ci:\n confint = [ci.center(max_ci) for ci in confint]\n\n # explicit f/g formatting, now uses forg, f or g depending on values\n params_data = zip([forg(results.params[i], prec=4) for i in exog_idx],\n [forg(results.bse[i]) for i in exog_idx],\n [forg(results.tvalues[i]) for i in exog_idx],\n # [\"%#6.3f\" % (results.pvalues[i]) for i in exog_idx],\n [\"%#6.3g\" % (results.pvalues[i]) for i in exog_idx],\n confint\n )\n params_data = list(params_data)\n parameter_table = SimpleTable(params_data,\n param_header,\n params_stubs,\n txt_fmt=fmt_params\n )\n\n if results.params.shape[0] > 2:\n vif_table = self._get_vif_table()\n parameter_table.extend_right(vif_table)\n\n return parameter_table", "def _format_parameterArray_unicode(self):\n art = unicode_art(*sum(zip([unicode_art(x)\n for l in self.parameterArray()\n for x in l],\n ([\", \"] * (self.classes()-1)\n + [\"; \"]) * 2), ())[:-1])\n return unicode_left_curly_brace.character_art(art.height()) + art \\\n + unicode_right_curly_brace.character_art(art.height())", "def __str__(self):\n return '\\n'+'\\n'.join([\"%-15s: %s\" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\\0'", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n return self.params", "def text(self) -> str:\n text = []\n\n if self.min is not None:\n text.append(str(self.min))\n if self.include_min:\n text.append(\"<=\")\n else:\n text.append(\"<\")\n\n if self.min is not None or self.max is not None:\n text.append(\"value\")\n\n if self.max is not None:\n if self.include_max:\n text.append(\"<=\")\n else:\n text.append(\"<\")\n text.append(str(self.max))\n\n if self.step is not None:\n if self.min is not None or self.max is not None:\n text.append(\"and\")\n text.extend([\"value %\", str(self.step), \"==\"])\n if self.min is None:\n text.append(\"0\")\n else:\n text.append(str(self.min % self.step))\n\n return \" \".join(text)", "def pretty_print(self, value, add_unit=False):\n s = \"%.1f\" % self.internal_to_friendly(value)\n if add_unit: s += \" \" + self.friendly_units\n return s", "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def __str__(self):\n\n string = \"values:\\n\\t\"\n string += \" x \".join(map(str, self.shape))\n\n string += \" {} ({})\\n\".format(type(self.values).__name__, self.values.dtype)\n\n if self.print_values is True:\n string += str(self.values) + \"\\n\"\n\n string += \"dims:\\n\\t\"\n\n string += \"{}\\n\".format(self.dims)\n\n string += \"coords:\\n\\t\"\n string += \"\\n\\t\".join(map(repr, self.coords))\n\n string += \"\\n\"\n\n string += \"attrs:\\n\"\n\n for ix, key in enumerate(self.attrs.keys()):\n if ix == self.max_print_attrs:\n string += \"\\t+%i attrs\" % (len(self.attrs) - self.max_print_attrs)\n break\n string += \"\\t{!r}: {!r}\\n\".format(key, self.attrs[key])\n\n return string", "def __str__(self):\n if self.f_has_range():\n lenstr = \"len:%d\" % self.f_get_range_length()\n else:\n lenstr = \"\"\n\n if self.v_comment:\n commentstr = \"`%s`\" % self.v_comment\n else:\n commentstr = \"\"\n\n if commentstr or lenstr:\n if commentstr and lenstr:\n combined_str = \"%s, %s\" % (lenstr, commentstr)\n elif commentstr:\n combined_str = commentstr\n elif lenstr:\n combined_str = lenstr\n else:\n raise RuntimeError(\"You shall not pass!\")\n\n infostr = \" (%s)\" % combined_str\n\n else:\n infostr = \"\"\n\n return_string = \"%s %s%s\" % (self.f_get_class_name(), self.v_full_name, infostr)\n\n if not self.f_is_empty():\n return_string += \": \" + self.f_val_to_str()\n\n return return_string", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.items()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"architecture = {}\\n\".format(self.architecture)\n status += \"activations = {}\\n\".format(self.activations)\n status += \"conv_activations = {}\\n\".format(self.conv_activations)\n status += \"conv_architecture = {}\\n\".format(self.conv_architecture)\n status += \"kernel_sizes = {}\\n\".format(self.kernel_sizes)\n status += \"pool_kernel = {}\\n\".format(self.pool_kernel)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def __str__(self):\n description = \"-------- %s --------\" % (self.name)\n description += \"\\nnmax = %i\" % (self.nmax)\n description += \"\\nnslots = %i\" % (self.nslots)\n description += \"\\nbonus_power = %i\" % (self.bonus_power)\n description += \"\\nbonus_initiative = %.1f\" % (self.bonus_initiative)\n description += \"\\nneeds_drive = %i\" % (self.needs_drive)\n description += \"\\nis_mobile = %i\" % (self.is_mobile)\n description += \"\\n----- Default Parts -----\"\n for i in range(len(self.default_parts)):\n description += \"\\n%i) %s\" % (i + 1, self.default_parts[i].name)\n return description", "def __str__(self):\n model_parameters = filter(lambda p: p.requires_grad, self.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n return super(BaseModel, self).__str__() + '\\nTrainable parameters: {}'.format(params)\n # print(super(BaseModel, self))", "def format_parameter_value(self, param_config, precision):\n # type: (Dict[str, Any], int) -> str\n return \"\"", "def print_str(self):\n print('*StanGpMatern with params={}'.format(self.params))", "def __str__(self):\n info_nvps = [\n ('sid', self.sid)\n ] + self.__str_additional_info_nvps__()\n # Create a \"name=val\" string for each name-value pair, then concatenate\n # them all together, separated by commas.\n info_str = ', '.join([\n '%s=%s' % (name, str(val)) for (name, val) in info_nvps])\n return '%s(%s)' % (self.__class__.__name__, info_str)", "def __str__(self) -> str:\n return '\\n'.join([f'{hp}: {self.hyperparams[hp]}'\n for hp in self.hyperparams])", "def __repr__(self):\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(str, self.pars)))", "def __str__(self) -> str:\n if self.scalar_vector:\n return f\"({self.w:-.4f} {self.x:+.4f}i {self.y:+.4f}j {self.z:+.4f}k)\"\n return f\"({self.x:-.4f}i {self.y:+.4f}j {self.z:+.4f}k {self.w:+.4f})\"", "def __str__(self, extended=False):\n _pretty_str = 'Damping: %s | Seg-Count: %s | ncoeff: %s \\n'\n _pretty_str = _pretty_str % (self.damp, self.nsegments,\n self.ncoeffs)\n\n if isinstance(self.modes_in, Modes):\n _pretty_str += 'Mode(s):'\n for mod in self.modes_in:\n _pretty_str += ' %s' % (mod.name)\n\n if isinstance(self.modes_cc_in, Modes):\n _pretty_str += '\\nCC mode(s):'\n for mod in self.modes_cc_in:\n _pretty_str += ' %s' % (mod.name)\n _pretty_str += '\\nname: %s\\n' % self.name\n\n return _pretty_str", "def __str__(self):\n # defaults to the class name\n if self.p is None:\n return self.__class__.__name__\n\n # class name and parameter values\n temp = [str(i) for i in self.p]\n return self.__class__.__name__+'('+', '.join(temp)+')'", "def __str__(self):\n\n print(\"\")\n s = \"NAME : \"+self._name+\"\\n\\n\"\n s += \"PARAMS :\"\n print(s)\n\n for key, val in self.params.items():\n l = (21-len(key))//7\n print(\"{0}\".format(key)+\"\\t\"*l+\":\\t{0}\".format(val))\n\n s = \"\\nRuns stored in DEFAULT_RUNS = \"+str(len(self.default_runs))\n print(s)\n\n s = \"\\nRuns stored in MOD_RUNS = \"+str(len(self.mod_runs))\n print(s)\n\n return \"\"", "def __str__(self):\r\n s = ''\r\n for i, (k, v) in enumerate(self.meters.items()):\r\n if i > 0:\r\n s += ' '\r\n s += k + ' ' + str(v)\r\n return s", "def __str__(self):\n txt = \"%s:\\n\" % self.name\n txt += \" Charge: %.4f\\n\" % self.charge\n txt += \" Radius: %.4f\" % self.radius\n return txt", "def __str__(self):\n _str = \"Variables:\\n\"\n for variable in self.variables:\n _str += \" {}\\n\".format(str(variable))\n _str += \"\\nConstraints:\\n\"\n for constraint in self.constraints:\n _str += \" {}\\n\".format(str(constraint))\n return _str", "def __str__(self) -> str:\n if len(self.saliva_data) > 0:\n return \"\"\"{}\n Saliva Type(s): {}\n Saliva Sample Times: {}\n Structure: {}\n \"\"\".format(\n self.name, self.saliva_types, self.sample_times, self.structure\n )\n return \"\"\"{}\n Structure: {}\"\"\".format(\n self.name, self.structure\n )", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __str__(self):\n s = \"\"\n for field in self.fields:\n if field.size not in VAR_PREFIXES:\n s += field.name + \": \" + str(field.size) + \" bits with value \" + str(field.value) + \".\\n\"\n else:\n s += field.name + \": variable size: \" + str(field.size) + \", with value \" + str(field.value) + \".\\n\"\n\n return s", "def format_param_pairs(self, params_pairs):\n out = \"\"\n for param in params_pairs:\n out += \"{} {} \".format(*param)\n return out", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def get_config_string(params, units=None):\n compact_str_items = []\n # first make a list of compact strings for each parameter\n for k, v in params.items():\n unit = \"\"\n if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors\n unit = units.get(k, \"\")\n compact_str_items.append(k + \"=\" + str(v) + unit)\n # and finally join them\n compact_str = \", \".join(compact_str_items)\n return compact_str", "def __str__(self):\n s = ''\n for i, (k, v) in enumerate(self.meters.iteritems()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s", "def __repr__(self):\n return pformat(vars(self))", "def __str__( self ):\n assert isinstance( self.level, int )\n assert isinstance( self.prop, WFF )\n assert isinstance( self.justification, Inference )\n\n return \"Step( %d, %s, %s )\" % ( self.num, repr( self.prop ), repr( self.justification ) )", "def displayDataDescr(cls):\n return (\n \"Parameter\",\n \"Auto range\",\n \"Lower\",\n \"Upper\",\n \"Number of bins\",\n \"X-axis scaling\",\n \"Y-axis weighting\"\n )", "def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'", "def full_def(self):\r\n if self.pronunciation:\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.heading.lstrip('!') or 0}, {self.pronunciation}\".rstrip(\", \")\r\n elif self.heading.lstrip('!'):\r\n return f\"{self.name}, {self.lat}, {self.lon}, {self.heading.lstrip('!')}\"\r\n return f\"{self.name}, {self.lat}, {self.lon}\"", "def __str__(self):\n return '[{0}, {1}]'.format(self.timeValuePairs, self.defaultValue)", "def __str__(self):\r\n unit = {True: '', False: ' dB(A)'}[isinstance(self.Lbeta, str)]\r\n values = (self.label(), str(self.Lbeta), unit, self.E, self.minTauG, INSULATIONS[self.insulation])\r\n return '%s: $L_\\\\beta$ = %s%s, $E$ = %.1f dB(A), $\\\\tau_G$ = %.1fs, insulation %.1f dB(A)' % values", "def str (self, max_len_first, max_len_following=0) :\r\n\r\n\t\tresult = [\"PARAMETER (\"]\r\n\t\tindent = len(result[0])\r\n\t\tmax_len_first -= indent+1 ## we assume it fits on the line\r\n\t\tif not max_len_following == 0 :\r\n\t\t\tmax_len_following -= indent\r\n\r\n\t\tnames = []\r\n\t\tfor param in self.parameters :\r\n\t\t\tnames += [param[0],\"=\", param[1], \",\"]\r\n\t\tdel names[-1]\r\n\r\n\t\t## FIXME: maybe there is a really long right hand side in the parameter\r\n\t\t## statement. So catch the exeption and in nessacasy split the rh-sides\r\n\t\t\r\n\t\tparams = tokenizer.join_tokens(names, max_len_first, max_len_following)\r\n\r\n\t\tresult[0] += params[0]\r\n\t\tfor line in params[1:] :\r\n\t\t\tresult.append (indent*\" \" + line)\r\n\r\n\t\tresult[-1] += \")\"\r\n\t\t\r\n\t\treturn [result]", "def info(self):\n import tc\n ## enumerate all options\n opts = self.to_list()\n res = \"\"\n fmt = \"%20s = %5s ## %s\\n\"\n\n for k, v in opts:\n res += fmt % (k, str(self.__getattribute__(k)),\n str(v.doc()).split('\\n')[0])\n\n return res", "def __str__(self):\n return f\"#{self.number}| {self.active}| {self.name}: {self.desc}\"", "def __str__(self):\n result=\"curv %f d0 %f z0 %f ctheta %f phi %f barcode %d\"%(self.curv,self.d0,self.z0,self.ctheta,self.phi,self.barcode)\n return result", "def _format_parameterArray_latex(self):\n return r\"\\left\\{%s; %s\\right\\}\" % tuple(', '.join(latex(x)\n for x in l) for l\n in self.parameterArray())", "def __str__(self):\n self._validate()\n commandline = \"%s \" % self.program_name\n for parameter in self.parameters:\n if parameter.is_set:\n #This will include a trailing space:\n commandline += str(parameter)\n return commandline.strip() # remove trailing space", "def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"", "def __str__(self):\n\n out = \"QuakeMigrate parameters\"\n out += \"\\n\\tTime step\\t\\t:\\t{}\".format(self.time_step)\n out += \"\\n\\n\\tData sampling rate\\t:\\t{}\".format(self.sampling_rate)\n out += \"\\n\\n\\tDecimation\\t\\t:\\t[{}, {}, {}]\".format(\n self.decimate[0], self.decimate[1], self.decimate[2])\n out += \"\\n\\n\\tBandpass filter P\\t:\\t[{}, {}, {}]\".format(\n self.p_bp_filter[0], self.p_bp_filter[1], self.p_bp_filter[2])\n out += \"\\n\\tBandpass filter S\\t:\\t[{}, {}, {}]\".format(\n self.s_bp_filter[0], self.s_bp_filter[1], self.s_bp_filter[2])\n out += \"\\n\\n\\tOnset P [STA, LTA]\\t:\\t[{}, {}]\".format(\n self.p_onset_win[0], self.p_onset_win[1])\n out += \"\\n\\tOnset S [STA, LTA]\\t:\\t[{}, {}]\".format(\n self.s_onset_win[0], self.s_onset_win[1])\n out += \"\\n\\n\\tPre-pad\\t\\t\\t:\\t{}\".format(self.pre_pad)\n out += \"\\n\\tPost-pad\\t\\t:\\t{}\".format(self.post_pad)\n out += \"\\n\\n\\tMarginal window\\t\\t:\\t{}\".format(self.marginal_window)\n out += \"\\n\\tPick threshold\\t\\t:\\t{}\".format(self.pick_threshold)\n out += \"\\n\\tPicking mode\\t\\t:\\t{}\".format(self.picking_mode)\n out += \"\\n\\tFraction ttime\\t\\t:\\t{}\".format(self.fraction_tt)\n out += \"\\n\\n\\tCentred onset\\t\\t:\\t{}\".format(self.onset_centred)\n out += \"\\n\\n\\tNumber of CPUs\\t\\t:\\t{}\".format(self.n_cores)\n\n return out", "def __str__(self):\n status = \"height = {}\\n\".format(self.height)\n status += \"width = {}\\n\".format(self.width)\n status += \"channels = {}\\n\".format(self.channels)\n status += \"classes = {}\\n\".format(self.classes)\n status += \"batch_size = {}\\n\".format(self.batch_size)\n status += \"epochs = {}\\n\".format(self.epochs)\n status += \"save_step = {}\\n\".format(self.save_step)\n status += \"learning_rate = {}\\n\".format(self.learning_rate)\n status += \"momentum = {}\\n\".format(self.momentum)\n return status", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def __str__(self):\n\t\n\t\tresult = \"\"\n\t\tresult += \"Torsional Spring Specs: \\n\"\n\t\tresult += \"Shape Eq. Slope: {0}\\n\".format(str(self.shape_slope))\n\t\tresult += \"Z Thickness: {0}\\n\".format(str(self.z_thick))\n\t\tresult += \"In-Plane Thickness: {0}\\n\".format(str(self.thick))\n\t\tresult += \"Spiral Length: {0}\\n\".format(str(self.length))\n\n\t\treturn result", "def __str__(self):\n out_tmplt = (\n \"Pole (lon/lat): {pollon}/{pollat}\\n\"\n \"lon_arr:\\n{lon_arr}\\n\"\n \"lat_arr:\\n{lat_arr}\\n\"\n )\n dic = {'pollon': self.pol_lon,\n 'pollat': self.pol_lat,\n 'lon_arr': self.lon_arr,\n 'lat_arr': self.lat_arr\n }\n return out_tmplt.format(**dic)", "def _pprint_params(self):\n return {'x_range': self.x_range, 'y_range': self.y_range,\n 'step': self.step, 'shape': self.shape,\n 'type': self.type}", "def __repr__(self):\n return \"<katpoint.Parameter %s = %s %s at 0x%x>\" % \\\n (self.name, self.value_str, self.units, id(self))", "def get_params(self, params, name_request):\n self.write('')\n for elem in params:\n request_type = elem['type'] if elem.get('type', None) else 'schema'\n name = elem['name']\n if elem.get('required', None):\n name += '(required)'\n schema = elem.get('schema', None)\n name = f':{name_request} {request_type} {name}:'\n if schema:\n definition = schema['$ref'].split('/')[-1]\n self.write(name + f' :ref:`{definition}`', 1)\n self.write('')\n else:\n desc = elem.get('description', '')\n self.write(name)\n self.write(f'{desc}', self.indent_depth + 1)\n self.write('')", "def __repr__(self):\n indent = len(self.type) + 2\n jstr = ',\\n' + ' ' * indent\n\n props = self._display_properties()\n\n params = jstr.join('{:}={:}'.format(p, summary(self[p],\n indent=indent))\n for (p, dp) in props)\n return '<{}({:})>'.format(self.type, params)", "def __str__(self):\n return (\">%s\\n\" % self.name) + \\\n wrap(self.sequence, self.COLUMNS)", "def __repr__(self):\n name = self.__class__.__name__\n # values = \", \".join(\"{}={}\".format(k, repr(v)) for k, v in sorted(self.__dict__.items())\n # if k[0] != \"_\" and not k.endswith('manager'))\n values = \", \".join(\"{}={}\".format(k, v) for k, v in self.parameters.items())\n return \"{}({})\".format(name, values)" ]
[ "0.7582478", "0.6667753", "0.65951604", "0.6493384", "0.64455795", "0.64114326", "0.63715106", "0.62896985", "0.6242153", "0.62383384", "0.62208116", "0.62207097", "0.61746264", "0.6109571", "0.61072844", "0.6098375", "0.60816747", "0.6051815", "0.6050969", "0.60330296", "0.6029222", "0.6026988", "0.59995276", "0.5982199", "0.5974766", "0.59525067", "0.5951336", "0.5951336", "0.5951253", "0.59383744", "0.5909703", "0.590557", "0.59048283", "0.59047353", "0.5904223", "0.5897322", "0.58906645", "0.58886105", "0.58882976", "0.58832175", "0.58737123", "0.5873627", "0.58657277", "0.58647496", "0.58558863", "0.5841271", "0.5825994", "0.5804215", "0.5803636", "0.5802975", "0.57926136", "0.5792148", "0.57844603", "0.5779163", "0.57750386", "0.5774455", "0.57700294", "0.57667154", "0.57598436", "0.5758945", "0.57531875", "0.5748131", "0.57423055", "0.573985", "0.5735701", "0.5729387", "0.5726019", "0.5715186", "0.5714819", "0.5705661", "0.5691674", "0.5682546", "0.5679039", "0.5673664", "0.56671494", "0.56644773", "0.5662941", "0.5661739", "0.5660383", "0.5656214", "0.56561154", "0.5655219", "0.5639238", "0.563299", "0.5632173", "0.5628855", "0.5627607", "0.56215256", "0.5612887", "0.5611699", "0.56099826", "0.56077886", "0.5601754", "0.5596545", "0.55863416", "0.55841035", "0.55829823", "0.55813915", "0.5570026", "0.5567845" ]
0.7280672
1
Short humanfriendly string representation of model object.
def __repr__(self): num_active = len([p for p in self if p]) return "<katpoint.%s active_params=%d/%d at 0x%x>" % \ (self.__class__.__name__, num_active, len(self), id(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n\n return '__str__ for Object'", "def __str__(self):\n return \"<%s: %s>\" % (self.__class__, self.describe())", "def __str__(self):\n return f\"{self._meta.verbose_name.title()}: {self.name}\"", "def to_short_string(self):\n return f'{self.name} - {self.resource_type}'", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def __str__(self):\n return f\"{self._meta.verbose_name.title()} — {self.name}\"", "def __str__(self):\n return f\"{self._meta.verbose_name.title()} #{self.id}\"", "def __str__(self):\n return f\"{self.full_name} ({self.short_name})\"", "def __str__(self):\n return f\"{self.full_name} ({self.short_name})\"", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return 'str-human.%s' % self.name", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.licence.name:s}\"", "def __str__(self):\n tablename = self.tablename()\n attrs = {}\n if Registry.SCHEMAS.has_key(tablename):\n for key in Registry.SCHEMAS[tablename]:\n attrs[key] = getattr(self, key, None)\n return \"<%s object: %s>\" % (self.__class__.__name__, str(attrs))", "def __str__(self):\n return self.make_flat()", "def __str__(self):\n return self._name+self._description", "def __str__(self):\n return '{}({})'.format(type(self).__name__, self.__name)", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n return self.s", "def __str__(self):\n return '%s' % (self.name)", "def __str__(self):\n return '%s' % (self.name)", "def __str__(self):\n return '%s' % (self.name)", "def __str__(self):\n return '%s' % (self.name)", "def __str__(self):\n return '%s' % (self.name)", "def __str__(self):\n s = \"[{}] ({}) {}\".format(str(\n type(self).__name__), self.id, self.__dict__)\n return s", "def __str__(self):\n return f\"<{full_class_name(self)} {self.name!r} @{'%x' % id(self)}>\"", "def __str__(self):\n return (\n f'{self.__class__.__name__}'\n f'\\n> defined by: {self._str_meta_()}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self)} objects'\n f'\\n{APtable.__str__(self)}'\n )", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n if self.__description:\n return self.__description\n return repr(self)", "def __str__(self):\n return \"{0} : {1}\".format(self.name, self.description)", "def display_name(self, obj):\n return six.text_type(obj)", "def stringify_short(self):\n return self.stringify()", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__, self.id,\n self.__dict__)", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__, self.id,\n self.__dict__)", "def __str__(self):\n return type(self).__name__ + str(vars(self))", "def __str__(self):\n str = \"[{}] ({}) {}\"\n return (str.format(self.__class__.__name__, self.id, self.__dict__))", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return self.format()", "def __str__(self):\r\n name = self.__class__.__name__\r\n return \"[{}] ({}) {}\".format(name, self.id, self.__dict__)", "def __str__(self):\n return f'{self.name}' # TODO", "def __str__(self):\n to_print = '{} : {}\\n'.format('Name'.ljust(34),self.name)\n to_print = to_print + '{} : {}\\n'.format('Name'.ljust(34),self.pathloss.name)\n to_print = to_print + '{} : {}\\n'.format('Number of samples'.ljust(34),self.nsamples)\n to_print = to_print + '{} : {}\\n'.format('Sensor model'.ljust(34),self.sensor_model.name)\n to_print = to_print + '{} : {}\\n'.format('Motion model'.ljust(34),self.motion_model.name)\n return to_print", "def __str__(self):\n return self.summary()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__,\n self.id, self.__dict__)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return \"[{}] {} - {} - {}\".format(\n self.__class__.__name__, self.id, self.email, self.display_name())", "def __str__(self):\n return \"%-10s\" % self._id + \"%-20s\" % self._name", "def __str__(self):\n\n return self.raw_field", "def __str__(self):\n return f'{self.short_creation_date} - {self.customer.username} - {self.vehiculeModel} - {self.quotationPrice}'", "def __str__(self):\n if __debug__:\n description = ('CM' in debug.active)\n else:\n description = False\n return self.asstring(short=False, header=True, summary=True,\n description=description)", "def _short_info(self) -> str:\n nullable = \"Nullable \" if self._is_nullable else \"\"\n\n # Good candidate for python pattern matching once <3.10 support no longer required\n num_metadata_items = len(self.__metadata)\n if num_metadata_items == 0:\n metadata = \"\"\n elif num_metadata_items == 1:\n metadata = f\" [with {num_metadata_items} metadata item]\"\n else:\n metadata = f\" [with {num_metadata_items} metadata items]\"\n\n return f\"<{nullable}{self.__class__.__name__}{metadata}: {self._resolve_field_name()}>\"", "def __str__(self):\n return f'<{self._name}>'", "def __str__(self):\n return f\"{self.id}: {self.title}\"", "def __str__(self):\n if hasattr(self, 'name'):\n return str(self.name)\n\n return super().__str__()", "def __str__(self):\n return str(self.description)[:10]", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n return super().__str__()", "def __str__(self):\n return f'{self.name}'", "def __str__(self):\n return f'{self.name}'", "def __str__(self):\n return (\"[{}] ({}) {}\".format(self.__class__.__name__,\n self.id, self.to_dict()))", "def __str__(self):\n return self.get_str()", "def __str__(self):\n return f\"<{type(self).__name__} {self.id}: {self.value}>\"", "def __str__(self):\n return (\"[{}] ({}) {}\".format(\n self.__class__.__name__, self.id, str(self.__dict__)))", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return str(self.name)", "def __str__(self):\n return self.fullname", "def __str__(self):\n return self.fullname", "def __str__(self):\n return \"{}\".format(super().__str__())", "def __str__(self):\n return f\"{self._desc:16s}\"", "def __str__(self):\n\t\treturn self.full_name", "def __unicode__(self):\n return unicode(self.obj)", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return self.get_string()", "def __str__(self):\n return f\"{self.name}\"", "def __str__(self) -> str:\n # stringifying a field as its field adds some convenience for cases where we need the field\n # name\n return cast(str, self._resolve_field_name(\"\"))", "def __str__(self):\n return \"ID {0:25} | Name: {1} \\n\".format(self.id, self.title)", "def short_desc(self):\n return str(self.id)", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n return str('%s (%s)' % (self.company, self.owner))" ]
[ "0.8028083", "0.8028083", "0.79938656", "0.7976418", "0.7948468", "0.77171636", "0.7662623", "0.76042145", "0.7593325", "0.7579472", "0.7536077", "0.7449067", "0.74469894", "0.74191", "0.7378081", "0.7339475", "0.73324215", "0.7328719", "0.7328719", "0.730302", "0.72758746", "0.72758746", "0.72638863", "0.72507024", "0.72367406", "0.71928126", "0.7173776", "0.71665823", "0.71254706", "0.7118914", "0.71105576", "0.71105576", "0.71105576", "0.71105576", "0.71105576", "0.7109261", "0.7105625", "0.71010274", "0.70994294", "0.7092507", "0.70863384", "0.70812285", "0.70774573", "0.7075403", "0.7075403", "0.70743865", "0.7071214", "0.7067822", "0.7067822", "0.7067822", "0.7067822", "0.7067822", "0.7067822", "0.7067822", "0.70588887", "0.70469123", "0.70371324", "0.70350784", "0.7031773", "0.70131266", "0.7003906", "0.69958323", "0.6981124", "0.6978567", "0.6973371", "0.69637704", "0.6952527", "0.69462645", "0.6933795", "0.6928071", "0.6926743", "0.69165856", "0.6916499", "0.69011635", "0.69007665", "0.69007665", "0.6898749", "0.6897467", "0.6886904", "0.68787336", "0.68759173", "0.68759173", "0.68759173", "0.68759173", "0.68759173", "0.68759173", "0.68759173", "0.68757355", "0.68757355", "0.6865975", "0.6864248", "0.6864245", "0.68584377", "0.6851851", "0.68421054", "0.6841433", "0.6838142", "0.68380636", "0.68339515", "0.6830534", "0.68234897" ]
0.0
-1
Verbose humanfriendly string representation of model object.
def __str__(self): num_active = len([p for p in self if p]) summary = "%s has %d parameters with %d active (non-default)" % \ (self.__class__.__name__, len(self), num_active) if num_active == 0: return summary return summary + ':\n' + '\n'.join(('%s = %s %s (%s)' % ps) for ps in self.param_strs())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return \"<%s: %s>\" % (self.__class__, self.describe())", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __str__(self):\n\n return '__str__ for Object'", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n return f\"{self._meta.verbose_name.title()}: {self.name}\"", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def __str__(self):\n to_print = '{} : {}\\n'.format('Name'.ljust(34),self.name)\n to_print = to_print + '{} : {}\\n'.format('Name'.ljust(34),self.pathloss.name)\n to_print = to_print + '{} : {}\\n'.format('Number of samples'.ljust(34),self.nsamples)\n to_print = to_print + '{} : {}\\n'.format('Sensor model'.ljust(34),self.sensor_model.name)\n to_print = to_print + '{} : {}\\n'.format('Motion model'.ljust(34),self.motion_model.name)\n return to_print", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return f\"{self._meta.verbose_name.title()} — {self.name}\"", "def __str__(self):\n if self.__description:\n return self.__description\n return repr(self)", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __str__(self):\n tablename = self.tablename()\n attrs = {}\n if Registry.SCHEMAS.has_key(tablename):\n for key in Registry.SCHEMAS[tablename]:\n attrs[key] = getattr(self, key, None)\n return \"<%s object: %s>\" % (self.__class__.__name__, str(attrs))", "def __str__(self):\n return self.summary()", "def __str__(self):\n return type(self).__name__ + str(vars(self))", "def __str__(self):\n s = \"[{}] ({}) {}\".format(str(\n type(self).__name__), self.id, self.__dict__)\n return s", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__, self.id,\n self.__dict__)", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__, self.id,\n self.__dict__)", "def __str__(self):\n return self.make_flat()", "def __str__(self):\n str = \"[{}] ({}) {}\"\n return (str.format(self.__class__.__name__, self.id, self.__dict__))", "def __str__(self):\n return f\"{self._meta.verbose_name.title()} #{self.id}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n return (\n f'{self.__class__.__name__}'\n f'\\n> defined by: {self._str_meta_()}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self)} objects'\n f'\\n{APtable.__str__(self)}'\n )", "def print_model(self, model):\n return \"null\"", "def __str__(self):\n return self._name+self._description", "def __str__(self):\n return '{}({})'.format(type(self).__name__, self.__name)", "def __str__(self):\n return \"{0} : {1}\".format(self.name, self.description)", "def __str__(self):\n if __debug__:\n description = ('CM' in debug.active)\n else:\n description = False\n return self.asstring(short=False, header=True, summary=True,\n description=description)", "def __str__(self):\r\n \r\n for att in self.__dict__.keys():\r\n print '%s: %r' % (att, getattr(self, att))\r\n \r\n return 'Completeness class object attributes'", "def __str__(self):\n return \"[{}] ({}) {}\".format(self.__class__.__name__,\n self.id, self.__dict__)", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\r\n name = self.__class__.__name__\r\n return \"[{}] ({}) {}\".format(name, self.id, self.__dict__)", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.licence.name:s}\"", "def __str__(self):\n return self.format()", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel (\" + self.__class__.__name__ + \") instance reporting in.\"\n return outstr", "def __str__(self):\n s = \"\"\n for x in self.__members:\n v = getattr(self, x)\n if s: s+=\", \"\n s += \"%s: %s\" % (x, `v`)\n return s", "def __str__(self):\r\n to_print = (\"Name: \" + self.name + \", Age: \" +\r\n str(self.age) + \", Hobbys: \" + str(self.hobbys))\r\n return to_print", "def __str__(self):\n\n desc = self.description\n if desc is not None:\n return str(desc)\n\n desc = self.debugDescription\n if desc is not None:\n return str(desc)\n\n return repr(self)", "def __str__(self):\n return (\n 'Name:\\t{}\\nType:\\t{}\\nAge:\\t{}'.format(self.__name,\n self.__animal_type,\n self.__age))", "def __str__(self):\n return stringify(\n Inspect(\n self,\n help=True,\n methods=True,\n private=True,\n dunder=False,\n sort=True,\n all=False,\n ),\n maxlen=-1,\n )", "def __str__(self):\n return \"{}\".format(super().__str__())", "def __str__(self):\n return repr(self)", "def __str__(self):\n return f\"<{full_class_name(self)} {self.name!r} @{'%x' % id(self)}>\"", "def __str__(self):\n return self.toString()", "def __str__(self):\n return self.__class__.__name__ + '\\n' + self.__class__.__doc__", "def get_str(self, obj):\n if self.pretty:\n return pprint.pformat(obj)\n else:\n return str(obj)", "def __str__(self):\n return self.s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n\n return self.toString()", "def printModel(self):\n print(self.model)", "def __str__(self):\r\n return repr(self)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def __str__(self):\n return super().__str__()", "def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string", "def __str__(self):\n return str(self.__s)", "def __str__(self):\n return (\"[{}] ({}) {}\".format(\n self.__class__.__name__, self.id, str(self.__dict__)))", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __str__(self):\r\n\r\n for att in self.__dict__:\r\n print(\"%s: %r\" % (att, getattr(self, att)))\r\n\r\n return \"Planet Population class object attributes\"", "def __str__(self):\r\n\r\n return 'v{} ({} objects)'.format(self.get_version(), len(self.objects))", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def describe(self):\n return str(self)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return \"{}\".format(self.name)", "def __str__(self):\n return (\"[{}] ({}) {}\".format(self.__class__.__name__,\n self.id, self.to_dict()))", "def __str__(self):\n return 'str-human.%s' % self.name", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\r\n return self.__repr__()", "def __str__(self):\r\n\r\n retval = self.__class__.__name__ + ' ('\r\n for val in self.VALUES:\r\n value = getattr(self, val, None)\r\n if value is not None:\r\n retval += '%s:%.4f ' % (val, getattr(self, val))\r\n return retval.strip() + ')'", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def __str__(self):\n return \"[{}] {} - {} - {}\".format(\n self.__class__.__name__, self.id, self.email, self.display_name())", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __repr__(self):\n return str(self.__dict__)", "def __str__(self):\n return f\"<{type(self).__name__} {self.id}: {self.value}>\"", "def __str__(self):\n if hasattr(self, 'name'):\n return str(self.name)\n\n return super().__str__()", "def __str__(self):\n return self.get_str()", "def print_model_description(verbose: bool):\n\n desc = get_model_description()\n description = f\"\"\"\nModel ID: {desc['id']}\nRelease Date: {desc['releaseDate']}\nCavity Labels: {desc['cavityLabels']}\nFault Labels: {desc['faultLabels']}\nTraining Data: {desc['trainingData']}\nBrief: {desc['brief']}\n\"\"\"\n if verbose:\n description += os.linesep + f\"Details: {desc['details']}\"\n print(description)", "def __str__(self):\n description = \"Object class Critter.\"\n description += \"\\nName: \" + self.name + \"\\nMood: \" + self.mood + \"\\nHunger: \" + str(self.hunger) + \"\\nBoredom: \" + str(self.boredom) + \"\\n\"\n return description", "def __str__(self):\r\n return \"{}, {}, reflection={}, appeared in year {}\".format(self.name, self.type,\r\n self.reflection, self.year)", "def __str__(self):\n # First obtain a string describing the underlying data model.\n strg = super(MiriTelescopeEmissionModel, self).__str__()\n \n # Add the extras\n if self.meta.instrument.filter is not None:\n strg += \"Data valid for filter=\\'%s\\' \" % \\\n self.meta.instrument.filter\n else:\n strg += \"Data valid for UNKNOWN filter \"\n if self.meta.telescope_temperature is not None:\n strg += \"and telescope temperature=%.2fK\" % \\\n self.meta.telescope_temperature\n else:\n strg += \"and UNKNOWN telescope temperature\"\n return strg" ]
[ "0.78885835", "0.7783697", "0.77635914", "0.77635914", "0.7730236", "0.760641", "0.75836486", "0.7574152", "0.75515556", "0.7453203", "0.74457", "0.7392917", "0.7392917", "0.7386357", "0.73556995", "0.7335823", "0.731109", "0.725926", "0.72402775", "0.720554", "0.71744984", "0.71362185", "0.7125322", "0.7123233", "0.7106851", "0.7075635", "0.7065986", "0.7065986", "0.70658463", "0.7059668", "0.7056086", "0.70483047", "0.70466673", "0.7031855", "0.7030226", "0.70283663", "0.7023085", "0.7020563", "0.7002492", "0.7002215", "0.6998701", "0.697958", "0.69760585", "0.6973811", "0.6962072", "0.6956188", "0.695552", "0.6950945", "0.6946332", "0.6946151", "0.69427043", "0.69407815", "0.69340783", "0.69291407", "0.6927977", "0.6926507", "0.69260436", "0.69255203", "0.6921309", "0.6918615", "0.6906875", "0.68831146", "0.68799734", "0.68761533", "0.6873252", "0.6860491", "0.6851422", "0.68498653", "0.6849266", "0.68419033", "0.6841263", "0.6835215", "0.6835215", "0.6835215", "0.6835215", "0.6835215", "0.6835215", "0.6835215", "0.6827273", "0.6822182", "0.6811208", "0.6811208", "0.6811208", "0.6811208", "0.6811208", "0.67992043", "0.679476", "0.67942107", "0.67940915", "0.67912835", "0.67912835", "0.67912835", "0.67912835", "0.67912835", "0.67906564", "0.67861795", "0.67709523", "0.6768272", "0.6768027", "0.6767002", "0.6759895" ]
0.0
-1
Equality comparison operator (parameter values only).
def __eq__(self, other): return self.description == \ (other.description if isinstance(other, self.__class__) else other)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def values_eq(self, a, b):\r\n return a == b", "def testEquality(self):\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def _equal_to_op(spec):", "def __eq__(self, rhs):\n return (\n (self.name == rhs.name)\n and (self.args == rhs.args)\n and (self.varargs == rhs.varargs)\n and (self.keywords == rhs.keywords)\n )", "def test_equality_method(self):\r\n wc1 = WhereClause('a', EqualsOperator(), 'c')\r\n wc2 = WhereClause('a', EqualsOperator(), 'c')\r\n assert wc1 == wc2", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self,*args):\n pass", "def __eq__(self, other):\n return other and self.values == other.values", "def aeq(*args):\r\n arguments = (arg for arg in args)\r\n first = next(arguments)\r\n assert all(arg == first for arg in arguments), \\\r\n \"Not all arguments have the same value: \" + str(args)", "def aeq(*args):\n arguments = (arg for arg in args)\n first = next(arguments)\n assert all(arg == first for arg in arguments), \\\n \"Not all arguments have the same value: \" + str(args)", "def aeq(*args):\n arguments = (arg for arg in args)\n first = next(arguments)\n assert all(arg == first for arg in arguments), \\\n \"Not all arguments have the same value: \" + str(args)", "def aeq(*args):\n arguments = (arg for arg in args)\n first = next(arguments)\n assert all(arg == first for arg in arguments), \\\n \"Not all arguments have the same value: \" + str(args)", "def aeq(*args):\n arguments = (arg for arg in args)\n first = next(arguments)\n assert all(arg == first for arg in arguments), \\\n \"Not all arguments have the same value: \" + str(args)", "def __eq__(self, other):\n return self.value == other or self.value == other.value", "def check_params_equality(self, *args, **kwargs):\n raise NotImplementedError(\"Class {} must implement method 'check_params_equality'\".format(type(self)))", "def __eq__(self, *args):\n return _ida_hexrays.carglist_t___eq__(self, *args)", "def __eq__(*args, **kwargs):\n return _uhd_swig.__eq__(*args, **kwargs)", "def __eq__(self, *args):\n return _ida_hexrays.carg_t___eq__(self, *args)", "def __eq__(self, other):\n return self.value == other.value", "def __eq__(self, other: 'OperatorConfig'):\n operator_name = self.operator_name == other.operator_name\n return (self.params == other.params\n and operator_name)", "def __eq__(self, other):\n return (other is not None and\n ((not self.name and not other.name) or\n self.name == other.name) and\n ((not self.expressions and not other.expressions) or\n self.expressions == other.expressions) and\n self.fields == other.fields and\n dict.__eq__(self.attrs or {}, other.attrs or {}))", "def equals(a, b, **kwargs):\n return lib.equals(a, b, **kwargs)", "def __eq__(self, other):\n if not isinstance(other, UnchangeableParam):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\r\n if other is not None:\r\n return self.value() == other.value()\r\n else:\r\n return False", "def __eq__(self, *args):\n return _ida_hexrays.operand_locator_t___eq__(self, *args)", "def __eq__(self, other: t.Any) -> bool:\n return self._op_bool('__eq__', other)", "def __eq__(self, rhs):\n return self.x == rhs.x and self.y == rhs.y", "def equals(x, y):\n return x == y", "def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)", "def __eq__(self, other):\r\n\t\treturn (self.type == other.type and self.value == other.value)", "def values_equal(t : Type, v1, v2) -> bool:\n return compare_values(t, v1, v2) == EQ", "def __eq__(self, *args):\n return _ida_hexrays.var_ref_t___eq__(self, *args)", "def __eq__(self, other):\n if other is None:\n return False\n if self.value == other.value:\n return True\n return False", "def __eq__(self, other):\n if isinstance(other, self._get_class()):\n return self._.hash_parameters == other._.hash_parameters\n else:\n return not isinstance(other, ASParameters) \\\n and self._.hash_parameters == other", "def __eq__(self, other):\n return super(Column, self).__eq__(tuple(other))", "def __eq__(self, *args):\n return _ida_hexrays.cexpr_t___eq__(self, *args)", "def __eq__(self, other):\n pass", "def __eq__(self, other):\n pass", "def __eq__(self, other):\n if other is self:\n return True\n if not isinstance(other, PotentialExpression):\n return False\n if not self.is_parametric:\n return (\n self.expression == other.expression\n and self.independent_variables == other.independent_variables\n )\n else:\n return (\n self.expression == other.expression\n and self.independent_variables == other.independent_variables\n and _are_equal_parameters(self.parameters, other.parameters)\n )", "def __eq__(self, other):\n self.conds.append((self.name, '==', other))\n return self", "def test_equality(self):\n # Make explicitly sure we're using ==:\n self.assertTrue(Comparable(1) == Comparable(1))\n self.assertFalse(Comparable(2) == Comparable(1))", "def __eq__(self, *args):\n return _ida_hexrays.cdo_t___eq__(self, *args)", "def test_equal_basic(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"equal\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::eq\"},\n )", "def __eq__(self, oth):\n return int(self) != oth", "def __eq__(self, other):\n if not isinstance(other, Parameter):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n raise NotImplementedError", "def __eq__(self, other):\n raise NotImplementedError", "def __eq__(self, other):\n raise NotImplementedError", "def test_equals(self):\n parameters = [\n (1, 'a', False),\n (1, None, False),\n (1, 2, False),\n (1, 1, True)\n ]\n for pair in parameters:\n with self.subTest(pair=pair):\n self.getLogger().info('Next pair %s', pair)\n _obj1 = Node(pair[0])\n _obj2 = None if not pair[1] else Node(pair[1])\n self.assertEqual(_obj1._equals(_obj2), pair[2])\n _objSelf = Node(1)\n self.assertTrue(_objSelf._equals(_objSelf))", "def __eq__(self, other):\r\n\t\tif self.eqHash == other.eqHash:\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False", "def __eq__(self, *args):\n return _ida_hexrays.vdloc_t___eq__(self, *args)", "def exact(cls, lhs, rhs):\n return lhs == rhs", "def __eq__(self, other):\n return self.cvarsort == other.cvarsort \\\n and set(self.iter_specified()) == set(other.iter_specified())", "def __eq__(self, other):\n return (other is self) or (isinstance(other, Expr)\n and self.op == other.op and self.args == other.args)", "def are_equal(value1, value2):\n if value1 == None or value2 == None:\n return True\n if value1 == None or value2 == None:\n return False\n return value1 == value2", "def __eq__(self, *args):\n return _ida_hexrays.qvector_carg_t___eq__(self, *args)", "def __eq__(self, *args):\n return _ida_frame.stkpnts_t___eq__(self, *args)", "def __eq__(self, other):\n return (((not self.name and not other.name) or\n self.name == other.name) and\n self.fields == other.fields)", "def __eq__(self,other):\n\t\tif other != None:\n\t\t\treturn self.id==other.id and \\\n\t\t\t\t self.length == other.length and \\\n\t\t\t\t self.value==other.value\n\t\telse:\n\t\t\treturn False", "def test_equals(self):\n self.assertEqual(cmp(u\"a\", u\"a\"), 0)\n self.assertEqual(cmp(1, 1), 0)\n self.assertEqual(cmp([1], [1]), 0)" ]
[ "0.7495514", "0.7350638", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71365744", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.71065164", "0.7087689", "0.70505005", "0.7010036", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6916973", "0.6892992", "0.6793727", "0.67602277", "0.67602277", "0.67602277", "0.67602277", "0.67593324", "0.67574763", "0.67468715", "0.67453295", "0.67272305", "0.670542", "0.6667207", "0.6646019", "0.6636872", "0.66008407", "0.65843976", "0.6569169", "0.656153", "0.6552357", "0.654675", "0.6545051", "0.65417594", "0.6538582", "0.6536735", "0.652562", "0.6513282", "0.64907223", "0.6474734", "0.6434762", "0.6434762", "0.6425539", "0.6414178", "0.6413112", "0.64095867", "0.6403924", "0.64015967", "0.6385161", "0.6383099", "0.6377721", "0.6377721", "0.6377721", "0.63749117", "0.6368779", "0.63547224", "0.63466394", "0.63440645", "0.634064", "0.6335249", "0.6334275", "0.63293123", "0.6328296", "0.63227", "0.63147146" ]
0.0
-1
Inequality comparison operator (parameter values only).
def __ne__(self, other): return not (self == other)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, values):\n self = self.__eq__(values)\n return self.__invert__()", "def __ne__(self, rhs):\n return not self.__eq__(rhs)", "def not_equal(lhs, rhs):\n return _make.not_equal(lhs, rhs)", "def __neq__(self, other): \n return not self == other", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def __ne__(self, other):\n return not (self == other) # opposite of __eq__", "def __ne__(self, other):\n return not (self == other) # opposite of __eq__", "def ne (self, other):\n return not (self == other) # opposite of eq", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not_equal(self, other)", "def __ne__(self, v):\n\t\treturn not (self == v)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(left, right):\n return (not (left == right))", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return int.__eq__(self, other)", "def __ne__(self, other: t.Any) -> bool:\n return self._op_bool('__ne__', other)", "def __ne__(self, other):\n\t\treturn not self.__eq__(other)", "def __ne__(self,other):\n return not (self == other)", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not(self == other)", "def __ne__(self,other):\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return tf.math.not_equal(self._ordinals, other.ordinal())", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n self.conds.append((self.name, '!=', other))\n return self", "def __ne__(self, other):\r\n return not (self == other)", "def __ne__(self, other):\r\n\t\treturn not(self.__eq__(other))", "def __ne__(self, oth):\n return int(self) == oth", "def __ne__(self, other):\n\t\treturn not self == other", "def __ne__(self, other):\n pass", "def __ne__(self, other):\n return other != self._cmpkey()", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other" ]
[ "0.72408104", "0.71416056", "0.69568586", "0.6941666", "0.68447745", "0.6843122", "0.6838853", "0.68197155", "0.6807232", "0.679406", "0.6777357", "0.673857", "0.673857", "0.673857", "0.673857", "0.6725665", "0.6725112", "0.6718812", "0.67053616", "0.6701271", "0.66915655", "0.6686087", "0.6677077", "0.6677077", "0.667271", "0.66724443", "0.6671919", "0.6671919", "0.66595256", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.665525", "0.6648712", "0.66426355", "0.66426355", "0.66426355", "0.66371244", "0.66232157", "0.66158307", "0.6613771", "0.65986335", "0.6592824", "0.65808225", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389", "0.6578389" ]
0.0
-1
Base hash on description string, just like equality operator.
def __hash__(self): return hash(self.description)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash(self) -> str:\r\n ...", "def __hash__(self):\n\n return hash((str(self.type) + str(self.value)))", "def __hash__(self):\n return hash(self.text)", "def hash(self) -> bytes:", "def hash(self, string):\n return self.__scaffydb.hash(string)", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def __hash__(self):\n return hash(str(self)) # use the __str__ method to obtain the hashcode", "def hash_string(self):\n return self._hash_string", "def __hash__(self):\n return hash(self.get_canonical_identifier())", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash(repr(self))", "def __hash__(self):\n return hash((self.title, self.isbn))", "def __hash__(self):\n return hash(self.literals)", "def __hash__(self):\n\t\treturn hash(repr(self))", "def __hash__(self):\n hash_value = 0\n \n # required\n hash_value ^= self.required << 14\n \n # title\n hash_value ^= hash(self.title)\n \n # type\n hash_value ^= hash(self.type)\n \n # values\n values = self.values\n if (values is not None):\n hash_value ^= len(values)\n \n for value in values:\n hash_value ^= hash(value)\n \n return hash_value", "def __hash__(self):\n return hash((self.SYMBOL, self._.hash_parameters))", "def __hash__(self):\n return hash((self._start, self._end, self._name, self._value))", "def __hash__(self):\n return hash(self.hash)", "def hash_me(cls, p_str, p_len=64):\n v_hash = str()\n v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len\n if v_len == EC.SHA512:\n v_hash = hashlib.sha512()\n elif v_len == EC.SHA256:\n v_hash = hashlib.sha256()\n elif v_len == EC.SHA224:\n v_hash = hashlib.sha224()\n elif v_len == EC.SHA1:\n v_hash = hashlib.sha1()\n\n v_hash.update(p_str.encode(\"utf-8\"))\n return v_hash.hexdigest()", "def __hash__(self):\n return hash(self.name)", "def __hash__(self):\n return hash(self.name)", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def __hash__(self):\n return hash(self.value)", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def __hash__(self):\n # These entities are not cached, so we wont use their `id` if applicable.\n hash_value = 0\n \n # bot\n hash_value ^= hash(self.bot)\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\r\n return hash((self.label, ))", "def __hash__(self) -> int:\n return hash(self._hashable_content())", "def __hash__(self):\n return hash(self.label())", "def HashValue(self) -> _n_0_t_3[_n_0_t_9]:", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\n return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)", "def hash_str(self):\n return '___'.join([self.key.kind(), self.key.string_id(),\n self._Hash()])", "def __hash__(self):\n return hash(self.joined())", "def __hash__(self):\n return hash(str(self.key))", "def __hash__(self):\n return hash(str(self.key))", "def __hash__( self ):\n return hash( self.data )", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def hash_key(self):", "def __hash__(self):\n return self.to_hash()", "def __hash__(self) -> int:\n return hash(tuple(self.name,))", "def __hash__(self):\n hash_value = hash(self.tag)\n for attribute in self.attributes:\n hash_value += hash(attribute)\n return hash_value", "def __hash__(self):\n return hash((self.benchmark, self.name))", "def __hash__(self) -> int:\r\n\r\n if isinstance(self.value, RawData):\r\n value = hash(self.value)\r\n else:\r\n value = self.value\r\n return hash((self.label, value, self.timestamp, self.version))", "def __hash__(self) -> int:\n return hash(repr(self))", "def __hash__(self):\n return hash(str(self.__id__))", "def hashcode(o):", "def __hash__(self):\n return self.word.__hash__()", "def hash(self):\n return Hash.dhash(bytes(self))", "def __hash__(self):\n hash_content = []\n hash_content.extend(self.analyzer_options)\n hash_content.append(str(self.analyzer_type))\n hash_content.append(self.target[self.lang])\n hash_content.append(self.source)\n return hash(''.join(hash_content))", "def __hash__(self):\n return hash((self._nele, self._m_s))", "def _Hash(content: bytes) -> str:\n return hashlib.sha256(content).hexdigest()", "def __hash__(self):\n return hash((self.name, self.state))", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def __hash__(self):\n if self.quality is None:\n return hash(md5(self.id.encode('UTF-8') + b'\\0' +\n self.sequence.encode('UTF-8')).digest())\n else:\n return hash(md5(self.id.encode('UTF-8') + b'\\0' +\n self.sequence.encode('UTF-8') + b'\\0' +\n self.quality.encode('UTF-8')).digest())", "def __hash__(self):\n return hash((self.type, self.data))", "def test_hash_params():\n assert (\n hash_params(\n {\n \"name\": \"my-name\",\n \"labels\": {\n \"label1\": \"label\",\n },\n }\n )\n == \"1c67a2e8dd405725a4cdf7b58fed3e948aed135ac25c494a3b336c83a72ac0c8\"\n )", "def HashAlgorithm(self) -> _n_7_t_0:", "def __hash__(self):\n raise NotImplementedError", "def __hash__(self):\n if self._hash is None:\n self._hash = hash(self._scheme) ^ hash(self._host) ^ hash(self._port) ^ hash(self._path) ^ hash(self._query) ^ hash(self._isRegularURI)\n return self._hash", "def compute_hash(self) -> str:\r\n #block_dict = self.__dict__.pop('hash', None) # Remove hash field value before calculating hash\r\n block_dict = self.__dict__.copy()\r\n block_dict.pop('hash', None) # Remove hash field value before calculating hash\r\n block_string = json.dumps(block_dict, sort_keys=True).encode('utf-8')\r\n return sha256(block_string).hexdigest()", "def unique_hash(self):\n raise NotImplementedError(\"unique_hash Method not implemented\")", "def hash(self, string):\n h = md5()\n h.update(string)\n return h.digest()", "def test_hash_string(self):\n self.assertEqual(hexlify(self._hashdigest(pubkey_sha)), sample_ripe)", "def str_to_hash(self, param):\n param = param.encode('utf-8')\n my_hash = hashlib.md5(param)\n return my_hash.hexdigest()", "def get_hash(self):\r\n return", "def __hash__(self):\n return hash(self.dec_value)", "def __hash__(self):\n return hash(self.query_path) ^ hash(self.field) ^ hash(self.visit_counter)", "def __hash__(self):\n return self.value.__hash__()", "def hashname(self):\n return hashlib.md5(self.name.encode('utf-8')).hexdigest()", "def __hash__(self):\n base = 1\n h = 0\n for l in self.data:\n for i in l:\n if i:\n h += base\n base *= 2\n return hash(h)", "def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()", "def hash(self):\n return self.__hash__()", "def __hash__(self):\n return super().__hash__()", "def keyhash(string):\n return hashlib.sha1(string.encode('utf-8')).hexdigest()", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def __hash__(self):\r\n return hash(self.__key())", "def default_hash():\n return \"!\"", "def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()", "def __str_to_hash(string_to_hash: str, errors: str = 'ignore') -> str:\n string_hash = string_to_hash.encode(encoding=\"utf-8\", errors=errors)\n return hashlib.md5(string_hash).hexdigest()", "def __hash__(self) -> int:", "def _hash(self, string, hash_type):\n hash_types = {\n 'TABLE_OFFSET': 0,\n 'HASH_A': 1,\n 'HASH_B': 2,\n 'TABLE': 3\n }\n seed1 = 0x7FED7FED\n seed2 = 0xEEEEEEEE\n\n for ch in string.upper():\n if not isinstance(ch, int): ch = ord(ch)\n value = self.encryption_table[(hash_types[hash_type] << 8) + ch]\n seed1 = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n seed2 = ch + seed1 + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n return seed1", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def __hash__(self) -> int:\n return hash(self.value)", "def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def hashWord(self, word):\n return self.func(word).hexdigest()", "def __hash__(self):\n\n return int(self._hash_value_)", "def __hash__(self):\n return hash(self.idl)", "def __hash__(self) -> int:\n return hash(self.identifier)", "def __hash__(self):\n return hash((self.begin, self.end))", "def __hash__(self):\n\t\treturn 1" ]
[ "0.72970223", "0.70238286", "0.6990711", "0.68921405", "0.6877547", "0.6868672", "0.6868672", "0.6868672", "0.6853107", "0.6853107", "0.6853107", "0.6853107", "0.6820892", "0.68152857", "0.67963797", "0.67854613", "0.67854613", "0.67854613", "0.6765069", "0.67579234", "0.6742125", "0.6736698", "0.670241", "0.6683789", "0.6656659", "0.6606796", "0.6572969", "0.6572969", "0.6533307", "0.6531844", "0.6531065", "0.65307814", "0.6524576", "0.651418", "0.65137756", "0.65063715", "0.65046847", "0.6491268", "0.64853555", "0.6484515", "0.6478664", "0.6463461", "0.6463461", "0.6451282", "0.6450119", "0.6436164", "0.64299536", "0.64275193", "0.6426153", "0.6404332", "0.63995", "0.63945657", "0.6393774", "0.63876116", "0.63616174", "0.6357527", "0.634348", "0.63394624", "0.6327957", "0.6323692", "0.63170034", "0.6316052", "0.63127005", "0.6304794", "0.62977505", "0.62972814", "0.6291896", "0.62904966", "0.62894535", "0.6289078", "0.6282609", "0.6281888", "0.6280591", "0.62725544", "0.6262689", "0.6261838", "0.6252167", "0.6251717", "0.6249006", "0.624719", "0.6230933", "0.62279147", "0.6216818", "0.6212497", "0.62119657", "0.62118244", "0.6210827", "0.6200333", "0.6187187", "0.6173829", "0.61613774", "0.6158322", "0.6158242", "0.61463845", "0.6145031", "0.61334276", "0.612963", "0.6115413", "0.61091983", "0.61059016" ]
0.8038765
0
Access parameter value by name.
def __getitem__(self, key): return self.params[key].value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getParam(self, params, name):\n return params.get(name)", "def getParameter(self, name):", "def GetValueByName(self, name):", "def get_param_with_name(self, param_name):\n return self.params[param_name]", "def get_parameter(self, name: str) -> any:\r\n if name in self.kwargs:\r\n return self.kwargs[name]\r\n for x in self.args:\r\n if isinstance(x, dict) and name in x:\r\n return x[name]\r\n else:\r\n return None", "def __getitem__(self, name: str) -> object:\n return super(Parameter, self).__getitem__(name)", "def getValue(self, name):\n values = self.__get('values')\n return values[name]", "def get_value(name):\n\n named_value = get_named_value_raw(name)\n if named_value is not None:\n return named_value.value", "def get_parameter(self, name):\n if name not in self._parameters.keys():\n raise ValueError(\"Component ({}) has no Parameter name ({})\".format(self.name, name))\n\n return self._parameters[name]", "def get_parameter_value(self, parameter_name):\n if parameter_name in self.description[\"config\"][\"values\"].keys():\n return self.description[\"config\"][\"values\"][parameter_name][\"value\"]\n else:\n return \"No such parameter\"", "def getValue(self, name):\n\n return getattr(self, name)", "def __getitem__(self, key):\n return self.parameters[key].value", "def gui_get_param(self,param_name):\n return self._tkvars[param_name].get()", "def _get_parameter(self, name):\n for parameter in self.parameters:\n if name in parameter.names:\n if isinstance(parameter, _Switch):\n return parameter.is_set\n else:\n return parameter.value\n raise ValueError(\"Option name %s was not found.\" % name)", "def _get_one_param(self, param_name):\n return getattr(self, '__' + param_name)", "def getParameter(self, session: Session, name: str) -> Parameter:\n\n try:\n dbParam = self._globalParametersDbHandler.getParameter(\n session, name)\n\n return Parameter.getFromDbDict(dbParam.__dict__)\n except TortugaException:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise", "def get(self, name):\n parts = name.split('.', 1)\n return getattr(self, parts[0]).get(parts[1], self.input_params_default[parts[1]])", "def get_param(self, name):\n for param in self._parameters:\n if name == param._name:\n return param\n pass\n new_param = self._add_param(name)\n return new_param", "def getValue(self, name: unicode) -> object:\n ...", "def get_parameter(self, key):\n return self._params[key]", "def getSSMParam(name):\n return ssm_client.get_parameter(\n Name=name,\n WithDecryption=True\n )['Parameter']['Value']", "def get_param(self, step_id, name):\n step_params = self._params.get(step_id)\n return step_params.get(name) if step_params else None", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def getValue(name, default=None):", "def get(self, name):\n\n # Fast path: check for a non-conditional param or for a conditional param\n # that was defined in the current scope.\n full_cond_name = self._get_name(name)\n if full_cond_name in self.values:\n if self._conditions_are_active():\n return self.values[full_cond_name]\n else:\n raise ValueError(\n 'Conditional parameter {} is not currently active'.format(\n full_cond_name))\n\n # Check for any active conditional param.\n found_inactive = False\n full_name = self._get_name(name, include_cond=False)\n for name, val in self.values.items():\n hp_parts = self._get_name_parts(name)\n hp_scopes = hp_parts[:-1]\n hp_name = hp_parts[-1]\n hp_full_name = self._get_name(\n hp_name,\n scopes=hp_scopes,\n include_cond=False)\n if full_name == hp_full_name:\n if self._conditions_are_active(hp_scopes):\n return val\n else:\n found_inactive = True\n\n if found_inactive:\n raise ValueError(\n 'Conditional parameter {} is not currently active'.format(\n full_cond_name))\n else:\n raise ValueError(\n 'Unknown parameter: {}'.format(full_name))", "def get_param(self, param_name):\n if hasattr(self, param_name):\n return getattr(self, param_name)\n else:\n return None", "def get(self, name, **valuefilter):\n if not valuefilter:\n valuefilter = self.valuefilter\n varobj = Variable(name, **valuefilter)\n value = varobj.get(gid=self.gid)\n return value", "def get(self, name):\r\n if isinstance(name, (list,tuple)): # get many?\r\n for n in name:\r\n if n not in self.prm:\r\n self._illegal_parameter(name)\r\n return [self.prm[n] for n in name]\r\n else:\r\n if name not in self.prm:\r\n self._illegal_parameter(name)\r\n return self.prm[name]", "def __getitem__(self, name):\n return self.get(name)", "def get(self, name):", "def __getitem__(self, name):\r\n return self.get(name)", "def get_argument(self, name):\n val = self.arguments.get(name)\n if val:\n return val[0]\n return None", "def __getattr__ (self, name):\n\t\ttry:\n\t\t\treturn self.__dict__[name]\n\t\texcept KeyError:\n\t\t\treturn self.__dict__[\"value\"][name]", "def get(self, var_name):\n if var_name in self._var_names:\n iv = self._var_names.index(var_name)\n return self._vals[iv]\n elif var_name in self._params:\n return self._params[var_name]\n else:\n raise KeyError(\"Nothing found for %s in vars (%s) or params (%s)\" % (str(var_name),\n ', '.join(self._var_names),\n ', '.join(self._params.keys())))", "def get_param_values(self,obs_name,param):\n\n return self.datasets[obs_name][param]", "def __getitem__(self, name):\n return self.f_get(name)", "def getParam(self, name, enum=None):\n return Parameter(self, name, enum)", "def get(self,name, return_names=False):\n matches = self.grep_param_names(name)\n if len(matches):\n if return_names:\n return self._get_params()[matches], np.asarray(self._get_param_names())[matches].tolist()\n else:\n return self._get_params()[matches]\n else:\n raise AttributeError, \"no parameter matches %s\"%name", "def getParameter(self, name, defaultValue):\n try:\n if self.params.has(name):\n return self.params.get(name)\n else:\n return defaultValue\n except JSONException as je:\n return defaultValue", "def getvalue(self,num,name):\n return self.M.conf(num)[name]", "def _get_ssm_param(self, parameter_name):\n response = self.ssm_client.get_parameter(Name=parameter_name)\n res = response.get(\"Parameter\", {})\n cwa_parameter = res.get(\"Value\", {})\n return cwa_parameter", "def get_param(self, param_name, memo=None):\n # Cast param_name to str once, for convenience:\n # (This is needed because Parameter members are Enum objects,\n # which can't be used in place of string-valued indexes)\n param_name = str(param_name)\n explicit_attr = getattr(self, param_name)\n if explicit_attr is not None:\n return explicit_attr\n else:\n return self.build_param(param_name, memo=memo)", "def get(self, name):\n pass", "def getvalue(self, name, *default):\n try:\n return self.getattr(name).value\n except KeyError:\n if default:\n return default[0]\n raise", "def __getitem__(self, name):\n value = self.get(name)\n if value is not None:\n return value\n raise KeyError(name)", "def get(self, name: str) -> Value:\n if name in self.read_hooks:\n func = self.read_hooks[name]\n log.debug(\"Will use function {} to read input\".format(func))\n val = func(name)\n return val\n if name in self._map:\n return self._map[name]\n log.debug(\"Did not find a mapping for variable '{}' in {}\".format(name, self._map))\n return self.default_value", "def input_param(self, name):\n return self._input_params.get(name, None)", "def __getitem__(self, name):\n if name in self.data: return self.data[name]", "def get_value(obj, name):\n if isinstance(obj, dict):\n return obj.get(name)\n\n return getattr(obj, name, obj)", "def setParameter(self, name, value):", "def __getitem__(self, name):\n \n # Can you have a variable and a structure with the same name?\n if name in self.vars:\n return self.vars[name]\n \n name = name.upper()\n if name in self.structs:\n return self.struct[name]\n\n raise KeyError('%s not found as a variable or structure' % (name))", "def get_variable_value(self, name):\n return self._design.GetVariableValue(name)", "def lookup(self, name):\n return self.fieldDict[name]", "def paramValue(self, key):\n return self.options[key]", "def __getitem__(self, name):\n return self.__getattr__(name)", "def get_params(self, name=None):\n\n if name is None:\n return self.params\n assert name in self.params\n return self.params[name]", "def get_variable(self, name):\n return self._properties[name]", "def _name_to_variable(self, name: str) -> Parameter:\n return cast(Parameter, super()._name_to_variable(name))", "def _get(self, name):\n raise NotImplementedError", "def get_param(self, param):\n return self.params.get(param, None)", "def argument(self, name_argument):\n answer = self._call('argument', argument=name_argument)\n return answer.name, answer.value", "def argument_value(self, idx: int):\n return self._values[idx][0]", "def get(self, name):\n for func in (self.getarg, self.getflag, self.getcmd):\n try:\n return func(name)\n except KeyError:\n pass\n return None", "def __getitem__(self, name):\n return getattr(self, name)", "def value(self, key, index=0):\n values = []\n for line in self.lines:\n if areTheSame(line[\"NAME\"],key):\n values.append(line[\"PARAM\"][index])\n if len(values) == 1:\n return values[0]\n\n return values", "def get_parameter_value(self,name,parameterized_object=None):\n source = parameterized_object or self.get_source_po(name)\n return source.get_value_generator(name)", "def getParam(self,param):\n if param in self.params.keys():\n return self.params[param]\n else:\n return None", "def get_value (self, name):\n value, type = wrapped (win32api.RegQueryValueEx, self.pyobject (), name)\n return value", "def getValue(self, valueName):\n\t\treturn self.settings[valueName][0]", "def __getattr__(self, name):\n value = self.__dict__.get(name)\n if not value:\n raise AttributeError('No such attribute {0}'.format(name))\n return value", "def __getitem__(self, name):\n return self.entry[name]", "def getParameter(self, *args):\n return _libsbml.KineticLaw_getParameter(self, *args)", "def get(self, path):\n return self.param_tree.get(path)", "def get_parameter(self, param):\n try:\n result = self._data[\"queryResult\"][\"parameters\"][param]\n except KeyError:\n result = None\n\n return result", "def get_variable(self, param_name):\n\n return self.session.run([var for var in tf.global_variables(self.tf_scope)\n if var.op.name.split(\"/\")[-1] == param_name][0])", "def namedModelParameter(self, s):\n try:\n idx = self.parameterNames.index(s)\n except ValueError:\n return None\n \n if idx >= len(self.stateVector):\n idx -= len(self.stateVector)\n val = self.otherModelParameters[idx]\n else:\n val = self.stateVector[idx]\n \n return val", "def get_parameter(cur, par):\n cur.execute(\"SELECT value FROM parameters WHERE par='%s';\" % par)\n return cur.fetchone()[0]", "def get(self, name=None):\n raise NotImplementedError", "def __getitem__(self, index) -> torch.nn.Parameter:\n return self.parameters[index]", "def __getitem__(self, name):\n idx = self.lookup[name]\n return self.stack[idx][1]", "def get_from_table(self, path, name):\n df_table = self.get(path)\n keys = df_table[\"Parameter\"]\n if name in keys:\n job_id = keys.index(name)\n return df_table[\"Value\"][job_id]\n raise ValueError(\"Unknown name: {0}\".format(name))", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def get_parameter(key):\n if key in param_dict:\n # was a supplied parameter\n param = param_dict.get(key)\n elif module and hasattr(module, key):\n param = getattr(module, key)\n if hasattr(param, \"__call__\"):\n # we don't allow module methods\n raise Exception()\n elif attr_getter:\n # get value from attr_getter function\n try:\n param = attr_getter(key)\n except: # noqa e722\n raise Exception()\n else:\n raise Exception()\n if isinstance(param, Composite):\n if param.text():\n param = param.copy()\n else:\n param = \"\"\n return param", "def get_parameter(par_name):\r\n config_file = open('./config.txt', 'r')\r\n lines = config_file.readlines()\r\n for line in lines:\r\n line = line.rstrip('\\n\\r')\r\n if line.startswith(par_name):\r\n return line.split('=')[1]", "def get_parameter(name, parameter, path=None):\n _ensure_exists(name, path=path)\n cmd = \"lxc-cgroup\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n cmd += f\" -n {name} {parameter}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n if ret[\"retcode\"] != 0:\n raise CommandExecutionError(f\"Unable to retrieve value for '{parameter}'\")\n return ret[\"stdout\"].strip()", "def getValue(self,value):\n if value in self.header.keys():\n return self.header[value]\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n if self.params is None:\n return None\n return self.params.get(value) #will return None if non-existent", "def get_parameter(self, field_name, default_value=None):\n if field_name in request.args:\n return get_literal(request.args.get(field_name, default_value))\n\n if self.data is not None and field_name in self.data:\n return self.data.get(field_name, default_value)\n\n return default_value", "def __getitem__(self, name):\n if self.outputs is not None:\n try:\n return self.outputs[name]\n except KeyError:\n if name in self._auto_ivc_map:\n return self.inputs[self._auto_ivc_map[name]]\n if self.inputs is not None:\n return self.inputs[name]\n elif self.inputs is not None:\n return self.inputs[name]\n\n raise KeyError('Variable name \"%s\" not found.' % name)", "def _fetch_ssm_parameter(key_name, with_decryption=False):\n try:\n ssm = boto3.client('ssm')\n response = ssm.get_parameter(\n Name=key_name,\n WithDecryption=with_decryption\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == 200:\n _print_debug(\n 'Found parameter with key name: \\'{}\\' in SSM.'.format(\n key_name))\n return response['Parameter']['Value']\n\n except ClientError as e:\n if e.response['ResponseMetadata']['HTTPStatusCode'] == 400:\n _print_error(\n 'Parameter with key: \\'{}\\' not found in SSM.'.format(key_name))\n else:\n _print_error(\n 'Unexpected error while trying to get parameter: \\'{}\\'. Exception: {}'.format(\n key_name, e))", "def get(name, default=None):", "def name(self, name):\n return self[self.name_cache[name]]", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def __getitem__(self, name):\n\n return self._settings[name]", "def __getitem__(self, item):\n if item == \"data\":\n return self.f_get()\n elif item == \"default\" or item == -1:\n return self.f_get_default()\n else:\n return super(Parameter, self).__getitem__(item)", "def get(self, pvname: str):\n if self.protocol == \"ca\":\n return caget(pvname)\n\n elif self.protocol == \"pva\":\n return self.context.get(pvname)", "def __getitem__(self, name):\n if name in self._variables:\n result = self._variables[name]\n\n if isinstance(result, Delayed):\n return result.get()\n return result\n\n raise VariableError(name)", "def get_trial_param(self, trial_id: int, param_name: str) -> float:\n raise NotImplementedError", "def get_attr(self, name: str):\n return self.call(name)" ]
[ "0.81560856", "0.8143507", "0.7823986", "0.7630813", "0.7557198", "0.7383771", "0.7367008", "0.7312119", "0.7287928", "0.7264385", "0.7236408", "0.7168084", "0.7144382", "0.7120741", "0.70506805", "0.7047843", "0.7025448", "0.70219696", "0.70115006", "0.69442713", "0.6915904", "0.6915825", "0.6872281", "0.68711305", "0.67981595", "0.6793245", "0.6772748", "0.6772261", "0.6768298", "0.6765007", "0.6748363", "0.67221075", "0.67207074", "0.6719711", "0.6714839", "0.6707496", "0.6695798", "0.6662277", "0.66439265", "0.6637341", "0.6636036", "0.66185313", "0.65875286", "0.65616894", "0.6557115", "0.65283704", "0.6518847", "0.64788973", "0.6462851", "0.6437803", "0.6433889", "0.642526", "0.63958395", "0.63830894", "0.6366012", "0.63303214", "0.63274145", "0.63237005", "0.630916", "0.6308142", "0.6299147", "0.62905824", "0.6288157", "0.62708604", "0.62694997", "0.6268176", "0.6259116", "0.624458", "0.62412435", "0.62292993", "0.6220185", "0.62177247", "0.6205254", "0.619409", "0.6192132", "0.61884737", "0.6182884", "0.6180518", "0.61798954", "0.6163776", "0.6161959", "0.6155548", "0.6155548", "0.6148343", "0.61399144", "0.61372584", "0.6137246", "0.6130305", "0.612415", "0.6113862", "0.61044717", "0.60945165", "0.60943854", "0.60943854", "0.60926425", "0.60906965", "0.60833406", "0.6075264", "0.6071111", "0.6068122" ]
0.6977442
19
Modify parameter value by name.
def __setitem__(self, key, value): self.params[key].value = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setParameter(self, name, value):", "def set(self,name,val):\n matches = self.grep_param_names(name)\n if len(matches):\n x = self._get_params()\n x[matches] = val\n self._set_params(x)\n else:\n raise AttributeError, \"no parameter matches %s\"%name", "def __setitem__(self, name: str, value):\n super(Parameter, self).__setitem__(name, value)", "def set_parameter(self, params, name, val):\n raise NotImplementedError()", "def set(self, name, value=None):\n if isinstance(name, dict):\n for key, value in name.items():\n try:\n param, value = self.coerceParameter(key, value)\n self.params[param] = value\n except KeyError:\n pass\n elif isinstance(name, str):\n try:\n param, value = self.coerceParameter(name, value)\n self.params[param] = value\n except KeyError:\n pass", "def _set_param(self, name, value):\n self._frozenjson._data[name] = value", "def update_parameter(self, name, freq, value):\n if name not in self._parameters.keys():\n self.add_parameter(name, [freq], [value])\n else:\n param = self.get_parameter(name)\n param.update_value(freq, value)", "def param_name(self, value):\n self._param_name = value", "def set_param(self, name, value, *, distrib=None, ref=None):\n raise NotImplementedError", "def put_param(self, attr_name, val):\n self._params[attr_name] = val", "def setTemplateParameter(self,name,value):\n self.tplparam[name] = value", "def log_param(self, name: str, value):\n self.params[name] = value\n\n self._sync_log_event()", "def set(self, name, value):\n pass", "def set_param(self, name, value):\n name = str(name)\n value = str(value)\n cxnlib.CXNNetSetParam(self.handle,\n ctypes.c_char_p(name.encode('utf-8')),\n ctypes.c_char_p(value.encode('utf-8')))", "def __setitem__(self, name, val):\r\n matches = self.grep_param_names(name)\r\n if len(matches):\r\n val = np.array(val)\r\n assert (val.size == 1) or val.size == len(matches), \"Shape mismatch: {}:({},)\".format(val.size, len(matches))\r\n x = self._get_params()\r\n x[matches] = val\r\n self._set_params(x)\r\n else:\r\n raise AttributeError, \"no parameter matches %s\" % name", "def set_param(params, pname, value=None, bounds=None):\n if value is not None:\n for p in params.flattened():\n if p.name == pname:\n p.value = value\n break\n\n if bounds is not None:\n for p in params.flattened():\n if p.name == pname:\n p.bounds = bounds\n p.vary = True\n break", "def getParameter(self, name):", "def set_parameter_value(self, parameter_name, new_value):\n self.description[\"config\"][\"values\"][parameter_name][\"value\"] = new_value\n ## Update MongoDB\n #self.mongo_client.cps2_project.objects.update_one(\n #{\"_id\": self.mongo_id},\n #{\"$set\": {\"config.values.\" + parameter_name + \".value\": new_value,\n #\"last_modified.value\": str(datetime.utcnow())}\n #}\n #)\n print(\"Switched the parameter \" + parameter_name + \" to \" + new_value + \" and updated MongoDB.\")", "def set_option(self, name, value):\n self._params[name] = value", "def set_option(self, name, value):\n self._params[name] = value", "def __setattr__(self, name, value):\n if name in ['parameters', 'program_name']: # Allowed attributes\n self.__dict__[name] = value\n else:\n self.set_parameter(name, value) # treat as a parameter", "def put(self, name, val):\n pass", "def setParameter(self,arg,value):\n self._params[arg] = value\n return self._params", "def add_or_replace_parameter(url, name, new_value):\n return _add_or_replace_parameters(url, {name: new_value})", "def set_value(name, value):\n\n # Get existing named value\n named_value = get_named_value_raw(name)\n\n if named_value is None:\n # Create new named value\n named_value = NamedValue()\n named_value.name = name\n\n # Edit value\n named_value.value = value\n\n # Save\n named_value.put()\n\n # Finished\n return named_value", "def set_parameter_value(self,name,val,parameterized_object=None):\n source = parameterized_object or self.get_source_po(name)\n object.__setattr__(source,name,val)\n\n # update the tkvar\n if name in self._tkvars:\n self._tkvars[name]._original_set(self._object2string(name,val))", "def setParam(self,param,value):\n if param in self.params.keys():\n self.params[param] = value", "def _put_ssm_param(self, parameter, parameter_name):\n self.ssm_client.put_parameter(\n Name=parameter_name,\n Type=\"String\",\n Value=json.dumps(parameter),\n Overwrite=True,\n Tier=\"Intelligent-Tiering\",\n )", "def rename_param(self, param, name):\n old_name = param.name\n new_name = self._get_unique_param_name(name, param.mode)\n \n param._name = new_name\n \n if param.mode == NodeParam.INPUT:\n self._input_params.pop(old_name)\n self._input_params[new_name] = param\n else:\n self._output_params.pop(old_name)\n self._output_params[new_name] = param\n \n return new_name", "def __updateParameter(self, currentParam, newParam):\n for i in xrange(len(currentParam)):\n for np in newParam:\n if np['name'] == currentParam[i]['name']:\n currentParam[i] = np", "def setName(self, name: str, /) -> Any:\n ...", "def __setattr__(self, name, value):\n if hasattr(self, name):\n super(JobSubmission, self).__setattr__(name, value)\n\n else:\n self.params[str(name)] = str(value) #TODO: resolve parameter cases", "def putparam(self,parname_,parvalue_): # 3\n res = self.__obj.putparam(parname_,parvalue_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None", "def set_parameter_value(self, parameter, value):\n pass", "def update(self, job_name, param_name, value, description=None):\n if job_name in self._jobs:\n getattr(self._jobs[job_name], param_name).update(value, description)\n else:\n self.log.error(\"Invalid job name: %s\", job_name)", "def setLocal(name, value):", "def setValue(self, name: unicode, value: object) -> None:\n ...", "def update(name):\n strRet = mapping(name)\n return strRet", "def __setJobParam( self, name, value ):\n if not self.jobID:\n return S_ERROR( 'JobID not defined' )\n\n self.log.verbose( 'setJobParameter(%s, %s, %s)' % ( self.jobID, name, value ) )\n return RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 ).setJobParameter( int( self.jobID ), str( name ), str( value ) )", "def setValue(self, name, value):\n values = self.__get('values')\n values[name] = value\n self.__set('values', values)", "def edit_parameter(request, parameter, **_kwargs):\n pass", "def change_general_param(self, param, val):\n assert param in self.params, '%s is not recognized as a valid parameter' % param\n self.params[param].change_value(val)", "def __setattr__(self, name, value):\n self.set(**{name: value})", "def set_parameter(self, params, name, val):\n if name == \"model\":\n params.model = val\n return params\n available_models = [\n entry_point.name\n for entry_point in pkg_resources.iter_entry_points(\n \"dxtbx.scaling_model_ext\"\n )\n ]\n phil_branches = [\n params.weighting.error_model,\n params.cut_data,\n params.scaling_options,\n params.reflection_selection,\n params.reflection_selection.random,\n params.reflection_selection.random.multi_dataset,\n ]\n if params.model:\n phil_branches.append(params.__getattribute__(str(params.model)))\n elif (\".\" in name) and (name.split(\".\")[0] in available_models):\n # if the user hasn't specified the model, but have done\n # e.g physical.parameter = *, then set model=physical\n params.model = name.split(\".\")[0]\n phil_branches.append(params.__getattribute__(str(params.model)))\n if \".\" in name: # handle e.g physical.absorption_correction\n name = name.split(\".\")[-1]\n for branch in phil_branches:\n try:\n branch.__setattr__(name, val)\n return params\n except AttributeError:\n pass\n # if get here, haven't found what we're trying to set\n raise ValueError(\"Unable to set chosen attribute \" + str(name) + \"=\" + str(val))", "def writeByName(self, name, value):\n pass", "def set_value(self, var_name, new_value, tf_session):\n\n if(var_name in self.assign_operator):\n\n tf_session.run(\n self.assign_operator[var_name], {\n self.l_param_input[var_name]: new_value})\n else:\n print(\"Thou shall only assign learning parameters!\")", "def modify_res_value(name, delta):\n pass", "def putparam(self,parname_,parvalue_):\n if isinstance(parname_,unicode):\n parname_ = parname_.encode(\"utf-8\",errors=\"replace\")\n if isinstance(parvalue_,unicode):\n parvalue_ = parvalue_.encode(\"utf-8\",errors=\"replace\")\n res = __library__.MSK_XX_putparam(self.__nativep,parname_,parvalue_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def TeamCityParamSetter(keyName, value):\n print(\"##teamcity[setParameter name='{}' value='{}']\".format(keyName, value))", "def updateName(self,name):\n self.name = name", "def SetExportParam(self, name, value):\n parent, subname = self.FindExport(name)\n subname = Handle._FixExportName(parent.obj, subname)\n if not hasattr(parent.obj, subname):\n raise KeyError(name)\n if not parent.obj.dirty:\n parent.obj.StartTransaction()\n parent.obj.dirty = True\n setattr(parent.obj, subname, value)\n return parent.obj", "def set(self, name, value):\n self.__getitem__(name).clear()\n self.add(name, value)", "def getParam(self, params, name):\n return params.get(name)", "def set_name(self, a_name):\n self.set_parameter('name', a_name)\n return self", "def increment(name):\n\n # Get existing named value\n named_value = get_named_value_raw(name)\n\n if named_value is None:\n # Create new named value\n named_value = NamedValue()\n named_value.name = name\n named_value.value = 0\n\n # Edit value\n named_value.value += 1\n\n # Save\n named_value.put()\n\n # Finished\n return named_value", "def addParameter(self, name, value):\r\n if not name:\r\n raise InvalidRequest('Parameter name is not a valid.')\r\n\r\n if name in self._parameters:\r\n raise InvalidRequest(\"Can not use the same parameter name '{0}' \"\r\n 'in the same container twice.'.format(name))\r\n\r\n parameter = self._obj.createParameter(name, value)\r\n self._parameters[name] = parameter\r\n parameter.notifyOnDeath(self._parameterDied)", "def set_param(self, name, val):\n # name will be 'colorR', 'colorG', 'colorB'\n rgb255 = int(val * 255)\n if name == 'colorR':\n self.color.r = rgb255\n elif name == 'colorG':\n self.color.g = rgb255\n elif name == 'colorB':\n self.color.b = rgb255", "def __adjust_param(self, option):\n # Get the name of the parameter.\n name = self.__option_params[option]\n\n # Ask the user for a new value.\n value = float(input(\"Enter value for {}: \".format(name)))\n self._params.update(name, value)\n\n # Update the description with the new value.\n desc = self.__make_description(name)\n self.update_description(option, desc)\n\n # Stay on the same menu.\n return self.get_name()", "def __setitem__(self, key: str, value: typing.Any):\n self._params[key].value = value", "def set_param(self, param_key, value):\n return self._params.set_param_value(param_key, value)", "def set(self, **parameters):\r\n for name in parameters:\r\n if name in self.prm:\r\n self.prm[name] = parameters[name]\r\n else:\r\n self._illegal_parameter(name)", "def name(self, value):\n self._name = value", "def name(self, value):\n self._name = value", "def set_param(\n self, param_name, *args,\n param_type=None, memo=None, **kwargs):\n # Cast param_name to str once, for convenience:\n # (This is needed because Parameter members are Enum objects,\n # which can't be used in place of string-valued indexes)\n param_name = str(param_name)\n param = self.build_param(\n param_name, *args, param_type=param_type, memo=memo, **kwargs)\n setattr(self, param_name, param)", "def process(self, name, val):\n pass", "def set_variable(self, name, value, notify=False, notify_tag=\"changed/*\"):\n split_name=tuple(dictionary.normalize_path(name))\n notify_list=[]\n with self._params_val_lock:\n if name in self._params_funcs:\n del self._params_funcs[name]\n self._params_val.add_entry(name,value,force=True)\n for exp_name in self._params_exp:\n if exp_name==split_name[:len(exp_name)] or split_name==exp_name[:len(split_name)]:\n notify_list.append((self._params_val[exp_name],self._params_exp[exp_name]))\n for val,lst in notify_list:\n for ctl in lst:\n ctl.send_message(self._variable_change_tag,val)\n if notify:\n notify_tag.replace(\"*\",name)\n self.send_signal(\"any\",notify_tag,value)", "def __setitem__(self, name, val):\n\n if name in self.vars:\n l[name].setVal(val)\n else:\n l[name] = YPFVal(name, val)", "def _set_valued_param(self, name, comp, spec, mode):\n vp = ValuedParam(name)\n signal = vp.read(comp, spec, mode)\n self._signals.update(signal)\n self._crossrefs[name] = vp\n setattr(self, name, vp)", "def setName(self, name):\n self.name = str(name)", "def set_param(self, label, val):\n assert type(label) is str, 'Parameter name \"%s\" is not string' % label\n assert type(val) is float or type(val) is int, 'Fixed parameter value is not numeric for %s' % label\n self.params[label] = val", "def update_parameter(cur, par, new_value):\n cur.execute(\"UPDATE parameters SET value=%f WHERE par='%s';\" % \n (new_value, par))", "def _update_param_from_tkvar(self,param_name):\n self.debug(\"TkPOb._update_param_from_tkvar(%s)\"%param_name)\n\n parameter,sourcePO=self.get_parameter_object(param_name,with_source=True)\n\n ### can only edit constant parameters for class objects\n if parameter.constant is True and not isinstance(sourcePO,type):\n return ### HIDDEN\n\n tkvar = self._tkvars[param_name]\n\n if self._tkvar_changed(param_name):\n # don't attempt to set if there was a string-to-object translation error\n if self.translators[param_name].last_string2object_failed:\n return ### HIDDEN\n\n # (use _original_get() because we don't want the tkvar to be reset to\n # the parameter's current value!)\n val = self._string2object(param_name,tkvar._original_get())\n\n try:\n self._set_parameter(param_name,val)\n except: # everything\n tkvar.set(tkvar._last_good_val)\n raise # whatever the parameter-setting error was\n\n self.debug(\"set %s to %s\"%(param_name,val))\n\n if hasattr(tkvar,'_on_modify'):\n tkvar._on_modify()\n\n ### call any function associated with GUI set()\n if hasattr(tkvar,'_on_set'):\n\n # CEBALERT: provide a way of allowing other gui components\n # to figure out where a callback error might have come\n # from. Callback instances (the Callback class is defined\n # in Tkinter.py) store a widget, but often it appears to\n # be the Tk instance - which is of no use in later\n # determining where an error might have originated.\n global _last_one_set\n if hasattr(self,'master'):\n _last_one_set = self.master\n\n tkvar._on_set()", "def addParameter(cTag, name, value): #@NoSelf", "def __setitem__(self, name: str, value):\n super(StdPrm, self).__setitem__(name, value)\n if not self.in_init:\n if name == \"value\":\n super(StdPrm, self).__setitem__(\"changed\", True)", "def GetValueByName(self, name):", "def set_parameter(self, sensor_name, parameter_name, parameter_value):\n if parameter_name == 'perspective_angle':\n parameter_value = parameter_value / (180 * 2) * math.pi\n if parameter_name in self.params_f:\n error = vrep.simxSetObjectFloatParameter(\n self.client_id,\n self.handles[sensor_name + self.postfix],\n self.params_f[parameter_name],\n parameter_value,\n ONE_SHOT_MODE\n )\n vrep.simxSetFloatSignal(\n self.client_id,\n 'change_params',\n parameter_value,\n ONE_SHOT_MODE\n )\n vrep.simxClearFloatSignal(\n self.client_id,\n 'change_params',\n ONE_SHOT_MODE\n )\n return error\n elif parameter_name in self.params_i:\n error = vrep.simxSetObjectFloatParameter(\n self.client_id,\n self.handles[sensor_name + self.postfix],\n self.params_i[parameter_name],\n parameter_value,\n ONE_SHOT_MODE\n )\n vrep.simxSetFloatSignal(\n self.client_id,\n 'change_params',\n parameter_value,\n ONE_SHOT_MODE\n )\n vrep.simxClearFloatSignal(\n self.client_id,\n 'change_params',\n ONE_SHOT_MODE\n )\n return error\n else:\n return 'Parameter not found'", "def __getitem__(self, name: str) -> object:\n return super(Parameter, self).__getitem__(name)", "def update_param_vals(pars, prefix, **kwargs):\n for key, val in kwargs.items():\n pname = \"%s%s\" % (prefix, key)\n if pname in pars:\n pars[pname].value = val\n return pars", "def __setitem__(self, name, value) -> None:\n self.__setattr__(name, value)", "def _update_param_from_tkvar(self,param_name,force=False):\n self.debug(\"TkPO._update_param_from_tkvar(%s)\"%param_name)\n\n param_obj = self.get_parameter_object(param_name)\n\n if not lookup_by_class(self.param_immediately_apply_change,\n type(param_obj)) and not force:\n return\n else:\n super(TkParameterized,self)._update_param_from_tkvar(param_name)", "def get_name(self, name):\n name.value = self._get_name(name.value.encode())", "def mutator(self, name):\r\n raise NotImplementedError", "def _valueChanged(self, instrument_name: str, parameter_name: str, value: Any, *args, **kwargs):\n instrument = self._instruments[self._instrumentnames.index(instrument_name)]\n logging.info('set %s.%s to %s' % (instrument_name, parameter_name, value))\n instrument.set(parameter_name, value)", "def tset(self, parametername, value_array):\n raise NotImplementedError", "def name(self, value):\n self._name = c(value)", "def __setattr__(self,name,val):\n # use dir() not hasattr() because hasattr uses __getattribute__\n if name in dir(self):\n\n if name in self.params():\n self.set_parameter_value(name,val,self)\n else:\n object.__setattr__(self,name,val)\n\n elif name in dir(self._extraPO):\n\n if name in self._extraPO.params():\n self.set_parameter_value(name,val,self._extraPO)\n else:\n object.__setattr__(self._extraPO,name,val)\n\n else:\n\n # name not found, so set on this object\n object.__setattr__(self,name,val)", "def get_param(self, name):\n for param in self._parameters:\n if name == param._name:\n return param\n pass\n new_param = self._add_param(name)\n return new_param", "def updateParameters(self, parameters):", "def set_parameter(self, name, value = None):\n set_option = False\n for parameter in self.parameters:\n if name in parameter.names:\n if isinstance(parameter, _Switch):\n if value is None:\n import warnings\n warnings.warn(\"For a switch type argument like %s, \"\n \"we expect a boolean. None is treated \"\n \"as FALSE!\" % parameter.names[-1])\n parameter.is_set = bool(value)\n set_option = True\n else:\n if value is not None:\n self._check_value(value, name, parameter.checker_function)\n parameter.value = value\n parameter.is_set = True\n set_option = True\n if not set_option:\n raise ValueError(\"Option name %s was not found.\" % name)", "def set_param(param, num, set_val):\n param[0][num] = set_val", "def setFeature(self, name, value=1):\n self.features[self.featureSet.getId(self.tag+name)] = value", "def __setitem__(self, name, value):\r\n return self.set(name=value)", "def set_info_value(self, name: str, value: Any) -> None:\n self._info_data[name] = value", "def write_parameter(self, parameter_name: str, parameter_value: Union[str, float, int]):\n self._parameters.append(Parameter(parameter_name, parameter_value))", "def set_parameter(name, parameter, value, path=None):\n if not exists(name, path=path):\n return None\n\n cmd = \"lxc-cgroup\"\n if path:\n cmd += f\" -P {shlex.quote(path)}\"\n cmd += f\" -n {name} {parameter} {value}\"\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n if ret[\"retcode\"] != 0:\n return False\n else:\n return True", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name" ]
[ "0.80584186", "0.7877263", "0.7659529", "0.7543547", "0.747998", "0.737759", "0.7360991", "0.7008698", "0.6985977", "0.69003344", "0.6785371", "0.6783321", "0.6777866", "0.6752631", "0.6717762", "0.6686711", "0.66651803", "0.6661193", "0.66512424", "0.66512424", "0.66446614", "0.66249114", "0.6619139", "0.65787405", "0.65640694", "0.6531833", "0.65317553", "0.64573795", "0.6437589", "0.6424231", "0.63911176", "0.6378646", "0.63754964", "0.6348217", "0.6340751", "0.63195974", "0.63183135", "0.63036996", "0.62644905", "0.6243319", "0.62367874", "0.6220432", "0.62159777", "0.6211304", "0.62091154", "0.62081254", "0.6202713", "0.6201563", "0.61701727", "0.61636597", "0.61448157", "0.61189467", "0.6090947", "0.6065373", "0.60587513", "0.6043399", "0.6036486", "0.60354024", "0.6035214", "0.60305345", "0.6022407", "0.6015646", "0.60103273", "0.60103273", "0.5993855", "0.5993652", "0.5969385", "0.59665984", "0.5959885", "0.59592026", "0.5956483", "0.595376", "0.59482926", "0.5939567", "0.59336317", "0.5926142", "0.59017587", "0.58973944", "0.5896375", "0.58844185", "0.5882965", "0.58823603", "0.58801425", "0.5878929", "0.5877987", "0.58768106", "0.5850238", "0.5847361", "0.58456314", "0.5833873", "0.5828672", "0.5821376", "0.58197755", "0.5817394", "0.57989025", "0.57959557", "0.5792912", "0.5792912", "0.5792912", "0.5792912" ]
0.6204177
46
List of parameter names in the expected order.
def keys(self): return self.params.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameter_names(self) -> List[str]:", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def get_param_names(self):\n return list(self.params.keys())", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def parameters_names(cls):\n return cls._Parameters._fields", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def fixture_sorted_param_names(allparams):\n return sorted(list(allparams.keys()))", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def parameter_names(self):\n return [x for x in self.transformations.values() if isinstance(x, str)]", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def get_param_names(self):\n # Sort the FIO parameter names to generate consistent fio commands\n all_param_names = super(FioCommand, self).get_param_names()\n # move the 'name' option as the first option of fio job\n all_param_names.remove(\"name\")\n all_param_names.insert(0, \"name\")\n\n return all_param_names", "def param_names(\n self, *, include_tp: bool = False, include_gq: bool = False\n ) -> List[str]:\n return (\n self._param_names(self.model, int(include_tp), int(include_gq))\n .decode(\"utf-8\")\n .split(\",\")\n )", "def get_param_names(obj: Union[Type[_BaseTpcpObject], _BaseTpcpObject]) -> List[str]:\n cls = obj if isinstance(obj, type) else type(obj)\n parameters = list(_get_init_defaults(cls).values())\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n \"tpcp algorithms and pipelines should always specify their parameters in the signature of their \"\n f\"__init__ (no varargs). {cls} doesn't follow this convention.\"\n )\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def names(self) -> List[str]:\n return sorted(self.hyperparams)", "def return_parameter_names():\n return list(titles), list(labels)", "def get_mandatory_param_names(self):\n all_names = self.params.keys()\n return [name for name in all_names \n if not self.params[name].is_optional]", "def param_unc_names(self) -> List[str]:\n return self._param_unc_names(self.model).decode(\"utf-8\").split(\",\")", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def _get_param_names(cls):\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n init_signature = signature(init)\n # Consider the constructor parameters excluding 'self'\n parameters = [\n p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD\n ]\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise EstimatorParameterError(\n 'scikit-learn estimators should always '\n 'specify their parameters in the signature'\n ' of their __init__ (no varargs).'\n \" %s with constructor %s doesn't \"\n ' follow this convention.' % (cls, init_signature))\n # Extract and sort argument names excluding 'self'\n return sorted([p.name for p in parameters])", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):\n if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)\n else: adjust = lambda x: x\n if recursive: names = [xi for x in self.parameters for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)]\n else: names = [adjust(x.name) for x in self.parameters]\n if add_self: names = map(lambda x: adjust(self.name) + \".\" + x, names)\n return names", "def getParameterList(self):\n inputList = []\n for name, n in zip(self._names, self._inputs):\n inputList += ['%s.x%d' % (name, i) for i in range(n)]\n return inputList", "def get_params(self, pnames=None):\n l = []\n if pnames is None:\n pnames = self._params.keys()\n for pname in pnames:\n p = self._params[pname]\n if isinstance(p, Parameter):\n l.append(p)\n return l", "def parameters(self):\n return []", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def get_params_list():\n return common.QOL_PARAMS", "def _create_parameter_names(self):\n self._parameter_names = [key for key in self.parameter_schema.keys() if key != 'num_simulations']", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def param(self):\n return []", "def param(self):\n return []", "def _get_param_names(cls):\n # fetch the constructor\n init = getattr(cls.__init__, 'Database Class', cls.__init__)\n\n if init is object.__init__:\n # no constructor to inspect\n params = []\n else:\n # inspect constructor\n sig = inspect.signature(init)\n parameters = [p for p in sig.parameters.values()\n if p.name != 'self' and\n p.kind != p.VAR_KEYWORD]\n\n for p in parameters:\n if p.kind == p.VAR_POSITIONAL:\n raise RuntimeError(\n 'Database objects should always specify their '\n 'parameters in the signature of their __init__. '\n '{class_} with constructor {signature} does not follow '\n 'this convention.'.format(\n class_=cls,\n signature=sig\n )\n )\n\n # Extract and sort argument names excluding 'self'\n params = sorted([p.name for p in parameters])\n\n return params", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def arg_names(self):\n return self._arg_names", "def arg_names(self):\n return self._arg_names", "def _hack_get_named_params(self):\n named_params = OrderedDict()\n params = self.get_parameters()\n if params:\n # See if we can hack to gether what the param names are\n unused = OrderedDict(sorted(self.__dict__.items()))\n for p in params:\n found = False\n for key, value in list(unused.items()):\n if p is value:\n named_params[key] = p\n unused.pop(key)\n found = True\n if not found:\n key = '__UNKNOWN_PARAM_NAME_{}__'.format(len(named_params))\n named_params[key] = p\n return named_params", "def get_params(self):\n return []", "def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def _create_parameter_names(self):\n self._parameter_names = self.parameter_schema[\"problem\"][\"names\"]", "def list_params(ns):\n try:\n ns = make_global_ns(ns)\n names = get_param_server().getParamNames()\n names.sort()\n return [n for n in names if n.startswith(ns)]\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")", "def param(self):\r\n\r\n return []", "def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result", "def _get_parameters(self) -> list:\n return self.parameters", "def get_parameters(parameters):\n\n arg_list = []\n opt_list = []\n for param in parameters:\n param_name = param['name']\n param_required = param['required']\n if param_required:\n arg_list.append(format_parameter(param_name, param_required))\n else:\n opt_list.append(format_parameter(param_name, param_required))\n\n return arg_list + opt_list", "def param(self):\r\n return []", "def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]", "def param_strs(self):\n name_len = max(len(p.name) for p in self)\n value_len = max(len(p.value_str) for p in self.params.values())\n units_len = max(len(p.units) for p in self.params.values())\n return [(p.name.ljust(name_len), p.value_str.ljust(value_len),\n p.units.ljust(units_len), p.__doc__)\n for p in self.params.values() if p]", "def getParams(options=None):\n result = []\n if options:\n members = options.__dict__\n for k, v in sorted(members.items()):\n result.append(\"# %-40s: %s\" % (k, str(v)))\n else:\n vars = inspect.currentframe().f_back.f_locals\n for var in filter(lambda x: re.match(\"param_\", x), vars.keys()):\n result.append(\"# %-40s: %s\" %\n (var, str(vars[var])))\n\n if result:\n return \"\\n\".join(result)\n else:\n return \"# no parameters.\"", "def get_parameterized_names():\n return [name.split('.')[0] for name in os.listdir(os.path.dirname(__file__) + '/../test_schemas')\n if 'mixins' not in name]", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def get_params(self):\n return list(self.params.values())", "def argnames(self):\n if self.get_key is None:\n return set()\n return set(self.get_key.names)", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def parameters(self):\n return [i for i in self.variables if has_roles(i, Parameter)]", "def _fc_in_parameters(self) -> List[str]:\n result = list() # type: List[str]\n\n for param in self.params:\n type_list = param.fc_type()\n for type_name, postfix in type_list:\n result.append('{} {}'.format(type_name, param.name + postfix))\n\n return result", "def _get_param_names_transformed(self):\r\n n = self._get_param_names()\r\n\r\n # remove/concatenate the tied parameter names\r\n if len(self.tied_indices):\r\n for t in self.tied_indices:\r\n n[t[0]] = \"<tie>\".join([n[tt] for tt in t])\r\n remove = np.hstack([t[1:] for t in self.tied_indices])\r\n else:\r\n remove = np.empty(shape=(0,), dtype=np.int)\r\n\r\n # also remove the fixed params\r\n if len(self.fixed_indices):\r\n remove = np.hstack((remove, np.hstack(self.fixed_indices)))\r\n\r\n # add markers to show that some variables are constrained\r\n for i, t in zip(self.constrained_indices, self.constraints):\r\n for ii in i:\r\n n[ii] = n[ii] + t.__str__()\r\n\r\n n = [nn for i, nn in enumerate(n) if not i in remove]\r\n return n", "def parameters(self):\n return [term.parameter for term in self.terms]", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def build_parameters(self) -> List[str]:\n param_bits = []\n for name in self.parameters:\n param_bits.extend(self.build_parameter_by_name(name) or [])\n return param_bits", "def params(self) -> List[ParamSpec]:\n return self._params", "def source_parameter_names(self):\n return [x for x, y in self.transformations.items() if isinstance(y, str)]", "def _get_param_names(self, direction: int, layer: int) -> List[str]:\n suffix = '_reverse' if direction == 1 else ''\n param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']\n if self.bias:\n param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']\n param_names = [x.format(layer, suffix) for x in param_names]\n return param_names", "def _formal_params(self, doclet):\n name, paren, params = self.arguments[0].partition('(')\n return ('(%s' % params) if params else '(%s)' % ', '.join(doclet['meta']['code']['paramnames'])", "def get_parameter_names(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n names = self._mechanistic_model.parameters()\n for error_model in self._error_models:\n names += error_model.get_parameter_names()\n return names\n\n return self._population_model.get_parameter_names()", "def get_derived_paramnames(self):\n names = []\n for name, shape in zip(self.likelihood.child_derived, self.derived_shapes):\n if not isinstance(name, str):\n name = name.__name__\n\n if shape == 0:\n names.append((name, name))\n else:\n names.extend(\n [\n (\n name + self._index_to_string(*ind),\n name + self._index_to_latex(*ind),\n )\n for ind in np.ndindex(*shape)\n ]\n )\n return names", "def get_original_names_and_sharded_parameters(self):\n orig_named_parameters = []\n for module_name, m in self.named_modules(): # includes self\n if isinstance(m, XlaFullyShardedDataParallel):\n prefix = \"\" if module_name == \"\" else module_name + \".\"\n for p in self.sharded_params:\n n = prefix + p._orig_name\n n = n.replace(\"_fsdp_wrapped_module.\", \"\").replace(\"_fpw_module.\", \"\")\n orig_named_parameters.append((n, p))\n\n return orig_named_parameters", "def print_param_names(model):\n for (param_name, param) in model.get_parameters().items():\n print(param_name, param.shape)", "def get_params(node):\n if node.type == 'parameter':\n return [(self.descend(node.args[0]), types.translation[self.descend(node.args[1])])]\n else:\n l = []\n for p in node.args:\n l.extend(get_params(p))\n return l", "def _get_parameters(self):\n return (self.SYMBOL, self.parameterArray())", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def parameterTypes(self, p_int): # real signature unknown; restored from __doc__\n return []", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def print_params(self):\n print(self._list_params())", "def required_names(self):\n return self.names", "def names(self) -> list[str]:", "def names(\n self\n ) -> Tuple[str, ...]:\n return self._names", "def get_params_iter(self):\n return []", "def parameter_names_from_model(model):\n variables = model.getVariables()\n itvar = variables.iterator()\n names = []\n for i in xrange(len(variables)):\n currentvar = itvar.Next()\n names.append(currentvar.GetName())\n return names", "def names(cls) -> List[str]:", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def identifiers(self):\n identifiers = []\n\n for item in self._definition.get('identifiers', []):\n identifiers.append(Parameter(**item))\n\n return identifiers", "def get_named_parameters(self):\n for name, _ in self.module_to_quantize.named_parameters():\n yield name, getattr(self, name)", "def _fi_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.fi_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result", "def step_extract_parameters(self) -> list:\n result = []\n if self.has_step_field(\"task.parameters\"):\n for param in self.step_field(\"task.parameters\"):\n for key in param:\n result += [\"-p\", key, param[key]]\n return result", "def named_trainable_parameters(self) -> List[str]:\n return [name for name, p in self._model.named_parameters() if p.requires_grad]", "def _get_param_names(class_x):\n\n # initialize the empty list of parameter names\n args = []\n\n try:\n # get signature of the original init method\n init = getattr(orig_init, 'deprecated_original', orig_init)\n init_signature = inspect.signature(init)\n\n # get all parameters excluding 'self'\n original_parameters = [p for p in init_signature.parameters.values()\n if p.name != 'self' and p.kind != p.VAR_KEYWORD]\n\n # there should be no varargs\n for parameter in original_parameters:\n if parameter.kind == parameter.VAR_POSITIONAL:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s with constructor %s doesn't \"\n \" follow this convention.\"\n % (cls, init_signature))\n else:\n args.append(parameter.name)\n\n except TypeError:\n pass\n\n # now get the additional rescaling arguments\n rescale_args = inspect.getargspec(class_x.__init__)[0]\n\n # Remove 'self'\n rescale_args.pop(0)\n\n # add the rescaling arguments to the original arguments and sort\n args += rescale_args\n args.sort()\n\n return args", "def get_layer_var_names(self):\n return(self.params)", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def get_resource_params():\n return Parameter.list()", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def _get_parameter_list(self, raw_command): # pylint: disable=no-self-use\n contents = raw_command.split(' ')\n return [item for item in contents if item.startswith('-')]", "def params(self):\n return self._pars" ]
[ "0.8899528", "0.8256276", "0.8223466", "0.81843793", "0.81773764", "0.76406306", "0.7631603", "0.75550413", "0.75500125", "0.74934304", "0.74912333", "0.74750197", "0.7323525", "0.7297529", "0.7277972", "0.7231873", "0.71911895", "0.71234643", "0.7113123", "0.71071076", "0.7076794", "0.7046652", "0.7033525", "0.70218796", "0.69966555", "0.6987485", "0.69672185", "0.6955856", "0.6948992", "0.68925995", "0.68495077", "0.6837998", "0.6826605", "0.6815433", "0.68027145", "0.6776981", "0.6776981", "0.677294", "0.67696214", "0.6764368", "0.6764368", "0.67497146", "0.67373943", "0.6728991", "0.6716955", "0.67016226", "0.66776943", "0.66352785", "0.6632901", "0.6629974", "0.66176534", "0.6613227", "0.65867376", "0.6574994", "0.65662706", "0.654472", "0.65223855", "0.64899594", "0.6484532", "0.64819497", "0.6455348", "0.6445259", "0.6440209", "0.6430902", "0.64290273", "0.63829625", "0.6369245", "0.6359385", "0.6344873", "0.63438886", "0.6310832", "0.6307841", "0.6305395", "0.63023823", "0.6292366", "0.628197", "0.6268248", "0.625611", "0.625493", "0.6234816", "0.6231013", "0.6193947", "0.6189122", "0.6188159", "0.6179801", "0.6172547", "0.6167724", "0.61650836", "0.61530775", "0.6148109", "0.6147196", "0.6145869", "0.61329806", "0.6131747", "0.6108845", "0.6108798", "0.6097372", "0.6079406", "0.60606295", "0.605385" ]
0.64429533
62
List of parameter values in the expected order ('tolist').
def values(self): return [p.value for p in self]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def get_params(self):\n return list(self.params.values())", "def _get_parameters(self) -> list:\n return self.parameters", "def values(self) -> List:\n pass", "def getParameterList(self):\n inputList = []\n for name, n in zip(self._names, self._inputs):\n inputList += ['%s.x%d' % (name, i) for i in range(n)]\n return inputList", "def get_params_as_list(self):\n\n\t\tparams = [self.shape_slope, self.z_thick, self.thick, self.length]\n\t\treturn params", "def _convert_params_values(self, values_list):\n values = list()\n for v in values_list:\n values.append(v)\n return values", "def param_values(self):\n return self._param_values", "def param(self):\n return []", "def param(self):\n return []", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def param(self):\r\n\r\n return []", "def param(self):\r\n return []", "def _param_to_list(param: OptionalConfigUnitList) -> List[\"ConfigUnit\"]:\n if param is None:\n return []\n if isinstance(param, list):\n return param.copy()\n return [param]", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def parameters(self):\n return []", "def values(self):\n return [i.value for i in self.items if isinstance(i, SQLParam)]", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def to_list(self):\n return copy.deepcopy(self._varvals)", "def parameter_names(self) -> List[str]:", "def get_params(self):\n return []", "def get_params_list():\n return common.QOL_PARAMS", "def _list_params(self, the_list: List):\n return [p for e in the_list for p in self._params(e)]", "def sensor_parameters_list(self):\n return list(self.params_f.keys()) + list(self.params_i.keys())", "def convert_params_to_list(self,params):\n result = {}\n for i in params:\n result_list = []\n result_list.append(params[i])\n result[i] = result_list\n return result", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def get_all_param_values(layer):\n params = get_all_params(layer)\n return [p.get_value() for p in params]", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def get_values(self):\n \n return []", "def get_param_names(self):\n return list(self.params.keys())", "def _value(self) -> List[Any]:\n ret = [\n cast_to(val, self.subtype)\n for sublist in self._stack\n for val in sublist\n ]\n\n if self.required and not ret:\n raise PyParamValueError(ARGUMENT_REQUIRED)\n self._stack = []\n return ret", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def as_list(self):\n return self._flattened_inputs", "def get_param_vals(self, parseq):\n\n vals = []\n for conf in self.sim_confs:\n val = conf[parseq]\n if val not in vals:\n vals.append(val)\n return vals", "def params(self) -> List[ParamSpec]:\n return self._params", "def get_params(node):\n if node.type == 'parameter':\n return [(self.descend(node.args[0]), types.translation[self.descend(node.args[1])])]\n else:\n l = []\n for p in node.args:\n l.extend(get_params(p))\n return l", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def step_extract_parameters(self) -> list:\n result = []\n if self.has_step_field(\"task.parameters\"):\n for param in self.step_field(\"task.parameters\"):\n for key in param:\n result += [\"-p\", key, param[key]]\n return result", "def params(self):\n params = []\n\n for v in vars(self).values():\n params.extend(self.__computeParams(v))\n\n if isinstance(v, list):\n for p in v:\n params.extend(self.__computeParams(p))\n\n return params", "def build_parameters(self) -> List[str]:\n param_bits = []\n for name in self.parameters:\n param_bits.extend(self.build_parameter_by_name(name) or [])\n return param_bits", "def get_params_iter(self):\n return []", "def param_values(self, pnames=None):\n l = self.get_params(pnames)\n v = [p.__get__(self)() for p in l]\n return np.array(v)", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def getParameterList(self, session: Session) -> TortugaObjectList:\n\n try:\n dbParameters = self._globalParametersDbHandler.getParameterList(\n session)\n\n return self.getTortugaObjectList(Parameter, dbParameters)\n except TortugaException:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise", "def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result", "def to_query_parameters_list(parameters):\n return [scalar_to_query_parameter(value) for value in parameters]", "def lfParams2paramsVec(params):\n paramsDict = params.valuesdict()\n paramsVec = [value for value in paramsDict.itervalues()]\n return paramsVec", "def parameters(self):\n return [term.parameter for term in self.terms]", "def get_params(self, pnames=None):\n l = []\n if pnames is None:\n pnames = self._params.keys()\n for pname in pnames:\n p = self._params[pname]\n if isinstance(p, Parameter):\n l.append(p)\n return l", "def values(self) -> list:\n return self.__values", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def values(self):\n return [i.value for i in self.value]", "def parameter_list(self):\n return [\n [encut, kpoint_mesh]\n for encut, kpoint_mesh in zip(\n self._job.iteration_frame.ENCUT, self._job.iteration_frame.KPOINT_MESH\n )\n ]", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def parameters(self):\n return [i for i in self.variables if has_roles(i, Parameter)]", "def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())", "def return_parameter_names():\n return list(titles), list(labels)", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def test_get_param_list(self):\n model = substitution_model.TimeReversibleNucleotide()\n self.assertEqual(model.get_param_list(), [])\n\n model = substitution_model.TimeReversibleNucleotide(\n predicates=[\"beta:transition\"]\n )\n self.assertEqual(model.get_param_list(), [\"beta\"])", "def list_params(ns):\n try:\n ns = make_global_ns(ns)\n names = get_param_server().getParamNames()\n names.sort()\n return [n for n in names if n.startswith(ns)]\n except socket.error:\n raise RosParamIOException(\"Unable to communicate with master!\")", "def __arg_list(self):\n arg = self.__arg()\n args = [arg]\n try:\n while not self.eol():\n self.match_value(Punctuator, \",\")\n arg = self.__arg()\n args.append(arg)\n except ParseError:\n pass\n return args", "def get_values(self) -> list:\r\n values = []\r\n for key, value in self._items:\r\n values.append(value)\r\n return values", "def get_value_list():\n return [some_random_number() for _ in range(some_random_number())]", "def params(self):\n return tuple(self._params)", "def _fc_in_parameters(self) -> List[str]:\n result = list() # type: List[str]\n\n for param in self.params:\n type_list = param.fc_type()\n for type_name, postfix in type_list:\n result.append('{} {}'.format(type_name, param.name + postfix))\n\n return result", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def arg_to_str_list(self) -> list:\n arg_list = []\n for arg in [*self.args]:\n if hasattr(arg, \"_ref\"):\n arg_list.append(arg.ref)\n else:\n arg_list.append(arg)\n return arg_list", "def to_list(self):\n return [value for value in self.program.values()]", "def get_params(self):\n params = []\n params.append(('from', self._from))\n params.append(('to', self._to))\n\n return params", "def return_values(self):\r\n\r\n values = list(self.piDD.values())\r\n return values", "def values():", "def _get_values(self) -> ty.List[float]:\r\n ...", "def parameters(self):\n return self._params", "def get_unsorted_args_list(self):\n return self.__unsorted_args", "def parameters(self):\n return [o.parameters for o in self.obs]", "def list(self) -> List:\n return list(self.values())", "def toStrList(values, precision=None):\n\treturn list(map(lambda va: toStr(va, precision), values))", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def get_params(self):\n return deepcopy(np.hstack([to_numpy(v).flatten() for v in\n self.parameters()]))", "def _to_list(value: Union[Dict[str, Any], List, Tuple, int], name=None, list_length=None):\n if not isinstance(value, (list, tuple)):\n if list_length is not None:\n value = [value] * list_length\n else:\n value = [value]\n if list_length is not None and len(value) != list_length:\n name = '' if name is None else name\n raise ValueError(\"hparams '%s' must be a list of length %d\" % (name, list_length))\n return value", "def listify_values(params):\n return dict((k, listify(v)) for (k, v) in params.iteritems())", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def to_list(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append((k, v))\n opts_list = sorted(opts_list)\n return opts_list", "def get_forward_parameter_list(self):\n parameterlist = []\n parameterlist.append(self.weights)\n if self.bias is not None:\n parameterlist.append(self.bias)\n return parameterlist", "def _get_parameter_list(self, raw_command): # pylint: disable=no-self-use\n contents = raw_command.split(' ')\n return [item for item in contents if item.startswith('-')]", "def get_params(hf):\n plist = []\n for p in hf['/input/params']:\n val = hf['/input/params'][p].value\n if type(val) != str:\n val = val.decode('UTF-8')\n plist.append(unpickle(val))\n return plist", "def param_names(\n self, *, include_tp: bool = False, include_gq: bool = False\n ) -> List[str]:\n return (\n self._param_names(self.model, int(include_tp), int(include_gq))\n .decode(\"utf-8\")\n .split(\",\")\n )", "def tolists(self):\n return self._times, self._values", "def _sorted_args(self):\n return self.args", "def fixture_sorted_param_names(allparams):\n return sorted(list(allparams.keys()))", "def values(self):\r\n return [self[k] for k in self]", "def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def tolist(self, flat=0):\n pass", "def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)", "def values(self) -> List[BaseValue]:\n raise NotImplementedError", "def get_parameters(parameters):\n\n arg_list = []\n opt_list = []\n for param in parameters:\n param_name = param['name']\n param_required = param['required']\n if param_required:\n arg_list.append(format_parameter(param_name, param_required))\n else:\n opt_list.append(format_parameter(param_name, param_required))\n\n return arg_list + opt_list", "def _split_parameters(self, parameters):\n if not parameters:\n return []\n return [parameter.strip() for parameter in parameters.split(', ')]" ]
[ "0.73683417", "0.7149095", "0.7077", "0.7076585", "0.7047869", "0.7043692", "0.7038208", "0.70156145", "0.7003395", "0.6993167", "0.6993167", "0.6947928", "0.69282645", "0.68996793", "0.68764114", "0.6848381", "0.6839425", "0.6823277", "0.6816432", "0.68062204", "0.6788033", "0.6782414", "0.6721201", "0.66666305", "0.6618802", "0.66168207", "0.661376", "0.65622723", "0.65390635", "0.649138", "0.6468455", "0.6464107", "0.64182967", "0.6401091", "0.63648343", "0.63602495", "0.63544995", "0.63376784", "0.6330309", "0.6330169", "0.6321826", "0.63111097", "0.6311044", "0.63062066", "0.630219", "0.62843674", "0.62798065", "0.6274918", "0.6272521", "0.62557256", "0.6234593", "0.62050396", "0.6195378", "0.6193846", "0.61860335", "0.6181652", "0.61517304", "0.6148104", "0.6128251", "0.61126965", "0.610623", "0.6099481", "0.6074993", "0.60705787", "0.60599333", "0.6058731", "0.60553694", "0.6051873", "0.60409945", "0.60275084", "0.6026624", "0.60123295", "0.5997733", "0.5992259", "0.59858036", "0.5985629", "0.5984335", "0.598147", "0.5957558", "0.5949212", "0.594833", "0.59448403", "0.5943605", "0.5942723", "0.59405595", "0.59398705", "0.59361213", "0.5935532", "0.5921178", "0.5917689", "0.5908643", "0.5907874", "0.5904974", "0.59020954", "0.58948916", "0.5894627", "0.5894045", "0.588626", "0.58859456", "0.58845764" ]
0.6143704
58
Load model from sequence of floats.
def fromlist(self, floats): self.header = {} params = [p for p in self] min_len = min(len(params), len(floats)) for param, value in zip(params[:min_len], floats[:min_len]): param.value = value for param in params[min_len:]: param.value = param.default_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load(self):\n for k,v in self.parameters.items():\n if isinstance(v,list):\n setattr(self,k,np.array(v,dtype=np.float32))\n else:\n setattr(self,k,v)", "def parse_model(f_name):\n if os.path.isfile(f_name):\n with open(f_name) as f:\n w = [[], [], [], [], []]\n for i, line in enumerate(f):\n for v in line.strip().split(\" \"):\n w[i].append(float(v))\n return np.matrix(w)\n else:\n error(\"parse model - not a file: %s\" % f_name)", "def load_data(self):\n return numpy.fromfile(self.data_fname, dtype=numpy.float32)", "def _synth_input(self, path, files):\n features = np.empty((0, 15))\n for i in range(len(files)):\n train_set = np.load(f'{path}coords/{files[i]}.npy')\n train_set = train_set.reshape((train_set.shape[0], -1))\n features = np.concatenate((features, train_set), axis=0)\n self.input_ = F.normalize(torch.tensor(np.array(features), dtype=torch.float32))", "def __init__(self, values: list[float]):\n self.values = values", "def from_list(cls, l):\n\n x, y, z = map(float, l)\n return cls(x, y, z)", "def put_float_list_to_feature(seq_example: tf.train.SequenceExample,\n value: Sequence[Sequence[float]], key: str):\n for s in value:\n seq_example.feature_lists.feature_list.get_or_create(\n key).feature.add().float_list.value[:] = s", "def read_model_list(self,filename):\n\n self.grid_params = config.grid_params\n\n # set the correct dimension:\n self.ndim = len(self.grid_params)\n\n # set prefix and postfix:\n listfile = open(filename,\"r\")\n line = listfile.readline().strip();\n columns = line.split()\n if (len(columns) < 1): sys.exit(\"Erroneous first line in %s.\"%(filename))\n self.prefix = columns[0]\n if (len(columns) > 1): self.postfix = columns[1]\n\n # read models and put them into evolutionary tracks:\n nmodels = 0\n nmodes = 0\n models_small_spectra = []\n for line in listfile:\n line = line.strip()\n columns = line.split()\n glb = np.empty((nglb,),dtype = gtype)\n glb[imass] = utilities.to_float(columns[1])\n glb[iradius] = utilities.to_float(columns[2])\n glb[iluminosity] = utilities.to_float(columns[3])\n glb[iz0] = utilities.to_float(columns[4])\n glb[ix0] = utilities.to_float(columns[5])\n glb[iage] = utilities.to_float(columns[6])\n glb[itemperature] = utilities.to_float(columns[7])\n\n i = 8\n for (name, name_latex) in config.user_params:\n glb[user_params_index[name]] = utilities.to_float(columns[i])\n i += 1\n\n # print glb[0]\n aModel = Model(glb, _name = columns[0])\n exceed_freqlim = aModel.read_file(self.prefix + columns[0] + self.postfix)\n aModel.multiply_modes(1.0/aModel.glb[ifreq_ref]) # make frequencies non-dimensional\n aModel.sort_modes()\n aModel.remove_duplicate_modes()\n for track in self.tracks:\n if (track.matches(aModel)):\n track.append(aModel)\n break\n else:\n aTrack = Track(aModel,self.grid_params)\n self.tracks.append(aTrack)\n nmodels += 1\n nmodes += len(aModel.modes)\n if (not exceed_freqlim):\n models_small_spectra.append(aModel.name)\n print(nmodels, nmodes)\n listfile.close()\n\n # right list of models with spectra which are too small in a file:\n output = open(\"models_small_spectra\",\"w\")\n for name in models_small_spectra: output.write(name+\"\\n\")\n output.close()\n\n # sort tracks:\n for track in self.tracks: track.sort()\n\n # sanity check:\n for track in self.tracks:\n duplicate = track.duplicate_ages()\n if duplicate[0]:\n print(\"ERROR: the track \",track.grid_params,\" = \",track.params)\n print(\" has models with the same age. Please remove\")\n print(\" duplicate models.\")\n print(\" Check models:\", duplicate[1], duplicate[2])\n sys.exit(1)\n\n # update list of indices:\n self.ndx = range(len(self.tracks))\n\n # need to create grid from scratch since tracks have been sorted.\n self.grid = np.asarray([track.params for track in self.tracks])", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def load_single_lstm_model(device, path):\n saved_model_data = torch.load(path, map_location=device)\n train_args = saved_model_data['args']\n model = build_eval_model_from_args(train_args, saved_model_data, device)\n return [model, train_args]", "def load_train_x(train_x_path):\n \n text = open(train_x_path, 'r')\n row = csv.reader(text , delimiter=\",\")\n x = []\n n_row = 0\n for r in row:\n if n_row != 0:\n for j in range(23):\n x.append(float(r[j]))\n n_row += 1\n text.close()\n x = np.array(x)\n x = np.reshape(x, (20000,23))\n \n return x", "def test_source_with_float_value():\n source = festim.Source(2.0, volume=1, field=\"solute\")\n assert isinstance(source.value, f.Constant)", "def floats(self) -> List[NumericType]:\n return [float(v) for v in self._record]", "def __load_raw_data(path: str,\n filename: str):\n filepath = os.path.join(path, filename)\n f = open(filepath)\n data = f.read()\n f.close()\n\n lines = data.split('\\n')\n header = lines[0].split(',')\n lines = lines[1:]\n\n float_data = np.zeros((len(lines), len(header) - 1))\n for i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i, :] = values\n\n return float_data", "def load_model(self, filename):\r\n pass", "def __init__(self, input_stream, load_all_models=False, extraParticleIdentifier='EP'):\n # initialize models\n self.load_all_models = load_all_models\n self.extraParticleIdentifier = extraParticleIdentifier\n self.models = []\n self._current_model = None\n self.default_model = None\n self.models_by_number = {}\n self._periodic_box_vectors = None\n self.sequences = []\n self.modified_residues = []\n # read file\n self._load(input_stream)", "def load_training(file_name, target_val, training_data, training_targets, \n elements):\n\n file = open(file_name, \"r\")\n\n # Iterate over file until empty line recieved\n while True:\n chunk = file.readline()\n\n if(chunk == ''):\n break\n\n ret = load_chunk(chunk, elements)\n\n training_targets.append(target_val)\n\n # Convert data to frequency domain using fft()\n training_data.append([i.real for i in fft(ret)])", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load(path_to_model):\n pass", "def load_NMF_model():\n model = pickle.load(open(\"models/nmf_model.sav\", 'rb'))\n Q = model.components_ \n return model, Q", "def load_model(self) -> Any:", "def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)", "def loadModel(self):\n for feature in self.features:\n featureName = feature[\"name\"]\n probabilities = repository.readProbabilities(self.modelName, featureName, self.modelClass)\n probabilities = probabilities.set_index(self.modelClass)\n\n modelForFeature = {\n \"probabilities\": probabilities\n }\n self.model[featureName] = modelForFeature", "def set(self, model=None):\n if isinstance(model, Model):\n if not isinstance(model, type(self)):\n raise BadModelFile('Cannot construct a %r from a %r' %\n (self.__class__.__name__,\n model.__class__.__name__))\n self.fromlist(model.values())\n self.header = dict(model.header)\n elif isinstance(model, basestring):\n self.fromstring(model)\n else:\n array = np.atleast_1d(model)\n if array.dtype.kind in 'iuf' and array.ndim == 1:\n self.fromlist(model)\n elif model is not None:\n self.fromfile(model)\n else:\n self.fromlist([])", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def load_rf_data(filename):\n A = np.loadtxt(filename, dtype=\"float32\", delimiter=\",\")\n\n X = A[:, :10]\n y = A[:, -1]\n\n return X, y", "def read_float(filename):\n\tf = open(filename, \"r\")\n\tarr = np.fromfile(f, dtype='>f4')\n\treturn arr", "def load_csv(fn):\n def iter_func():\n with open(fn, 'r') as infile:\n for line in infile:\n line = line.rstrip().split(',')\n for item in line:\n yield float(item)\n load_csv.rowlength = len(line)\n data = np.fromiter(iter_func(), dtype=float)\n data = data.reshape((-1, load_csv.rowlength))\n return data", "def _float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def _float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def float_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def float_feature(value):\n if not isinstance(value, list) and not isinstance(value, np.ndarray):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def load_model(self, model_path: str):", "def load(self, uri):\r\n self._encoder = load_model(uri+\"_lstm_encoder.hdf5\")\r\n self._autoencoder = load_model(uri+\"_lstm_autoencoder.hdf5\")\r\n\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)))\r\n dict_options = pf[os.path.basename(uri)+\"_options.json\"]\r\n\r\n self._latent_space = dict_options['latent_space']\r\n self._input_cells = dict_options['input_cells']", "def _float_feature(value):\n value = _ensure_list(value)\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def load_seq_model():\n model = joblib.load(os.path.join(os.path.dirname(__file__), 'RuleSet3.pkl'))\n return model", "def load(self):\n\n x = [] # input documents (n_docs, max_seq_len)\n labels = [] # targets we are predicting for each input\n\n for file_path in glob.glob(self.train_dir + '*.txt'):\n tokens = read_tokens(file_path)\n unique = list(set(tokens))\n x_count = round(len(unique) * 0.85)\n\n for _ in range(self.samples_per_doc):\n random.shuffle(unique)\n x.append(' '.join(unique[:x_count]))\n labels.append(' '.join(unique[x_count:]))\n\n # make x and y\n pkl = open('Model/tokenizer.p', 'rb')\n self.tokenizer = pickle.load(pkl)\n x = self.tokenizer.texts_to_matrix(x, mode='binary')\n y = self.tokenizer.texts_to_matrix(labels, mode='binary')\n\n # column zero is empty\n return x, y[:,1:]", "def read_model(input_file):\n with open(input_file) as inp:\n labels = inp.readline().strip().split(\" \")\n init_conc = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n\n stoich = []\n for i in range(len(labels)):\n stoich.append(list(map(float, inp.readline().strip().split(\" \"))))\n S_matrix = np.array(stoich)\n\n educt = []\n for i in range(len(labels)):\n educt.append(list(map(float, inp.readline().strip().split(\" \"))))\n educt_matrix = np.array(educt)\n\n kin_par = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n t_T, t_eval_step = list(map(float, inp.readline().strip().split(\" \")))\n\n return labels, init_conc, S_matrix, educt_matrix, kin_par, t_T, t_eval_step", "def load_file(file_name) -> np.ndarray:\r\n reader = csv.reader(open(file_name, \"r\"), delimiter=',')\r\n x_rdr = list(reader)\r\n return np.array(x_rdr).astype('float')", "def load_examples():\n X = []\n Y = []\n with open('examples.txt') as fin:\n for i, line in enumerate(fin):\n if line[0].isdigit():\n bias, pos, neg, label = map(float, line.strip().split(','))\n X.append([bias, pos, neg])\n Y.append(label)\n X = np.array(X)\n Y = np.array(Y).reshape(i, 1)\n return X, Y", "def import_data(fndata):\n with open(fndata, 'rb') as f:\n # split lines\n lsdata = [line.split(',') for line in f.read().splitlines()]\n # map to float\n lsdata = [map(float, row) for row in lsdata]\n\n # use numpy array\n arrdata = np.array(lsdata)\n\n return arrdata", "def load_model():\n global columns\n global data\n \n model = pickle.load(open('MedCostModel.pkl', 'rb'))\n data = pd.read_csv('MedCosts.csv')\n data = data.drop(columns=['charges'])\n columns = data.columns\n return(model)", "def load_data(filename):\n data = []\n with open('data/' + filename) as raw_data:\n for line in raw_data.readlines():\n data.append(float(line.strip('\\n')))\n return data\n # data = np.mat(np.genfromtxt('data/' + filename)).T\n # return data", "def load_model(seriesname):\n LOG.debug(\"Calling load_model() with the following arguments:\")\n LOG.debug(\"seriesname = %s\"%seriesname)\n \n result = []\n return result", "def test_regularization_is_float(self):\n reg = modelgen.get_regularization(0, 5)\n assert type(reg) == np.float", "def load_model(self, **params):\n \t# file_name = params['name']\n # return pickle.load(gzip.open(file_name, 'rb'))", "def convert_stream(self, stream):\n return np.fromstring(stream, \"Float32\")", "def load(cls, f, model, ext_unit_dict=None):\n msg = (\n \"Model object must be of type flopy.mfusg.MfUsg\\n\"\n f\"but received type: {type(model)}.\"\n )\n assert isinstance(model, MfUsg), msg\n\n if model.verbose:\n print(\"loading bcf package file...\")\n\n f_obj = get_open_file_object(f, \"r\")\n\n # dataset 0 -- header\n while True:\n line = f_obj.readline()\n if line[0] != \"#\":\n break\n\n # determine problem dimensions\n nlay = model.nlay\n dis = model.get_package(\"DIS\")\n if dis is None:\n dis = model.get_package(\"DISU\")\n njag = dis.njag\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - line already read above\n if model.verbose:\n print(\" loading ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET...\")\n text_list = line_parse(line)\n ipakcb, hdry, iwdflg, wetfct, iwetit, ihdwet = (\n int(text_list[0]),\n float(text_list[1]),\n int(text_list[2]),\n float(text_list[3]),\n int(text_list[4]),\n int(text_list[5]),\n )\n\n ikvflag = type_from_iterable(\n text_list, index=6, _type=int, default_val=0\n )\n ikcflag = type_from_iterable(\n text_list, index=7, _type=int, default_val=0\n )\n\n # LAYCON array\n laycon, intercellt = cls._load_laycon(f_obj, model)\n\n # TRPY array\n if model.verbose:\n print(\" loading TRPY...\")\n trpy = Util2d.load(\n f_obj, model, (nlay,), np.float32, \"trpy\", ext_unit_dict\n )\n\n # property data for each layer based on options\n transient = not dis.steady.all()\n anis = any(t != 1 for t in trpy)\n anglex = 0\n if (not model.structured) and anis:\n if model.verbose:\n print(\"loading ANGLEX...\")\n anglex = Util2d.load(\n f_obj, model, (njag,), np.float32, \"anglex\", ext_unit_dict\n )\n\n # hy, kv, storage\n (sf1, tran, hy, vcont, sf2, wetdry, kv) = cls._load_layer_arrays(\n f_obj,\n model,\n nlay,\n ext_unit_dict,\n transient,\n laycon,\n ikvflag,\n ikcflag,\n iwdflg,\n )\n\n # Ksat mfusg\n ksat = 0\n if (not model.structured) and abs(ikcflag == 1):\n if model.verbose:\n print(\" loading ksat (njag)...\")\n ksat = Util2d.load(\n f_obj, model, (njag,), np.float32, \"ksat\", ext_unit_dict\n )\n\n f_obj.close()\n\n # set package unit number\n unitnumber, filenames = get_unitnumber_from_ext_unit_dict(\n model, cls, ext_unit_dict, ipakcb\n )\n\n # create instance of bcf object\n bcf = cls(\n model,\n ipakcb=ipakcb,\n intercellt=intercellt,\n laycon=laycon,\n trpy=trpy,\n hdry=hdry,\n iwdflg=iwdflg,\n wetfct=wetfct,\n iwetit=iwetit,\n ihdwet=ihdwet,\n ikvflag=ikvflag,\n ikcflag=ikcflag,\n tran=tran,\n hy=hy,\n vcont=vcont,\n kv=kv,\n anglex=anglex,\n ksat=ksat,\n sf1=sf1,\n sf2=sf2,\n wetdry=wetdry,\n unitnumber=unitnumber,\n filenames=filenames,\n )\n\n # return bcf object\n return bcf", "def load_weights_model(self, list_path):\n [path_encoder, path_decoder, path_discriminator, path_scaler] = list_path\n self.encode_.load_weights(path_encoder)\n self.decode_.load_weights(path_decoder)\n self.b.load_weights(path_discriminator)\n self.scaler = joblib.load(path_scaler)", "def load(fh, model):\n graphs = penman.load(fh, cls=XMRSCodec)\n xs = [model.from_triples(g.triples()) for g in graphs]\n return xs", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])", "def read_input_float_feature(feature_map, key, shape):\n if shape is None:\n (dim_z, dim_y, dim_x) = feature_map.feature[key + '/dim'].int64_list.value\n else:\n (dim_z, dim_y, dim_x) = shape\n tensor = np.array(feature_map.feature[key].float_list.value[:]).reshape(\n dim_z, dim_y, dim_x)\n return tensor", "def test_op_fillfrom_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_r = stream.empty_like(a)\n offl_r.fillfrom(a)\n r = offl_r.update_host().array\n stream.sync()\n self.assertTrue((a == r).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, r))", "def load_model(self, model_as_bytes: bytes) -> None:\n\n self.model = deserialize_from_zippy(model_as_bytes)", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def read_models(path):\n columns = ['model index', 'rmse', 'mae', '_', 'feature id']\n\n def clean_func(string):\n return int(string.replace(')', ''))\n model_df = pd.read_csv(path,\n delim_whitespace=True, \n index_col=0,\n header=0,\n names=columns)\n del model_df['_'] # TODO: better handling of this SISSO formatting\n model_df = model_df.dropna()\n model_df['feature id'] = model_df['feature id'].apply(clean_func)\n model_df = model_df.apply(pd.to_numeric)\n return model_df", "def _float_feature(value):\n if isinstance(value, list):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def is_sequence_of_float(items):\n return all(isinstance(item, float) for item in items)", "def _float_feature(value):\n if isinstance(value, list):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n else:\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def loadmodel(fname):\n if not fname.endswith('.pickle.gz'):\n fname = fname + '.pickle.gz'\n with gzip.open(fname, 'r') as fin:\n D = load(fin)\n print 'Load model from file: {}'.format(fname)\n return D", "def load_data(self, features=None, labels=None):\n if features is None or labels is None:\n self._features = None\n self._labels = None\n return\n if len(features) != len(labels):\n raise DataMismatchError('Features and labels lists are different lengths')\n try:\n self._features = np.array(features, dtype=float)\n self._labels = np.array(labels, dtype=float)\n except ValueError:\n self._features = None\n self._labels = None\n raise ValueError('Label and feature lists must be homogeneous (same data type)'\n 'and numeric (i.e integers and floats) list of lists')", "def deserialize_numpy(self, str, numpy):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v43 = val1.position\n _x = _v43\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v44 = val1.orientation\n _x = _v44\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v45 = val1.stamp\n _x = _v45\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v46 = val1.position\n _x = _v46\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v47 = val1.orientation\n _x = _v47\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v48 = val1.position\n _x = _v48\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v49 = val1.orientation\n _x = _v49\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v50 = val1.position\n _x = _v50\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v51 = val1.orientation\n _x = _v51\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v52 = val1.stamp\n _x = _v52\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v53 = val1.position\n _x = _v53\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v54 = val1.orientation\n _x = _v54\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v55 = val1.position\n _x = _v55\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v56 = val1.orientation\n _x = _v56\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=length)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def __init__(self, height = None, width = None, ratio=None, type=None):\n \n self.dF = []\n self.feature = []\n self.Class = []\n self.featureNumpy = []\n self.ClassNumpy = []\n \n self.model = []\n \n self.fTrain = []\n self.fTest = []\n self.cTrain = []\n self.cTest = []", "def load_data(self, filename='', limit=0.75, delim=','):\n with open(filename) as data:\n reader = csv.reader(data, delimiter=delim)\n f = list(reader)\n for x in range(len(f)):\n for y in range(len(f[0]) - 1):\n f[x][y] = float(f[x][y]) # convert elements of each array to type float except the last one\n\n lim = limit * (len(f)) # calculate where the training data and test data are divided\n lim = int(lim) # convert limit for indexing purposes\n results = (f[:lim], f[lim:]) # append training data and test data to tuple\n\n # for x in range(len(f)):\n # print(f[x])\n\n del f # delete f array which was temporary\n\n return results # return value", "def read_model_data(model, filename):\n filename = os.path.join('./', '%s.%s' % (filename, PARAM_EXTENSION))\n with open(filename, 'r') as f:\n data = pickle.load(f)\n lasagne.layers.set_all_param_values(model, data)", "def load_model_params(self, full_path):\n \n print(\"Loading model parameters from %s\"%full_path)\n with open (full_path, 'rb') as f:\n \n self.theta = cPickle.load(f)\n \n if self.num_hidden == True or (self.num_hidden > 0):\n \n self.W, self.b, self.bhid = self.theta\n \n else:\n \n self.W, self.b = self.theta", "def __test_float(self, bk):\n for arg in self.args['float']:\n print(\"\\nTesting:\", arg)\n ds = ArgoDataFetcher(backend=bk).float(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def load_norm_file(fname):\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []", "def _load_data(self, ti, tf):\n # return pre loaded\n try:\n if ti == self.ti_prev and tf == self.tf_prev:\n return self.fM, self.ys\n except AttributeError:\n pass\n\n # read from CSV file\n try:\n t = pd.to_datetime(pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], usecols=['time'], infer_datetime_format=True).index.values)\n if (t[0] <= ti) and (t[-1] >= tf):\n self.ti_prev = ti\n self.tf_prev = tf\n fM,ys = self._extract_features(ti,tf)\n self.fM = fM\n self.ys = ys\n return fM,ys\n except FileNotFoundError:\n pass\n\n # range checking\n if tf > self.data.tf:\n raise ValueError(\"Model end date '{:s}' beyond data range '{:s}'\".format(tf, self.data.tf))\n if ti < self.data.ti:\n raise ValueError(\"Model start date '{:s}' predates data range '{:s}'\".format(ti, self.data.ti))\n \n # divide training period into years\n ts = [datetime(*[yr, 1, 1, 0, 0, 0]) for yr in list(range(ti.year+1, tf.year+1))]\n if ti - self.dtw < self.data.ti:\n ti = self.data.ti + self.dtw\n ts.insert(0,ti)\n ts.append(tf)\n\n for t0,t1 in zip(ts[:-1], ts[1:]):\n print('feature extraction {:s} to {:s}'.format(t0.strftime('%Y-%m-%d'), t1.strftime('%Y-%m-%d')))\n fM,ys = self._extract_features(ti,t1)\n\n self.ti_prev = ti\n self.tf_prev = tf\n self.fM = fM\n self.ys = ys\n return fM, ys", "def read_basel(fname):\n f = open(fname)\n elems = f.read().split()\n lamb = np.zeros(1221, dtype=FTYPE)\n for i in xrange(0, 1221):\n lamb[i] = float(elems[i])\n\n flux, modelno, teff, logg, mh, vturb, xh = [], [], [], [], [], [], []\n ite = iter(elems[1221:])\n try:\n while True:\n modelno.append( float(ite.next()) )\n teff.append( float(ite.next()) )\n logg.append( float(ite.next()) )\n mh.append( float(ite.next()) )\n vturb.append( float(ite.next()) )\n xh.append( float(ite.next()) )\n tmpflux = np.zeros(1221, dtype=FTYPE)\n for i in xrange(1221):\n tmpflux[i] = float(ite.next())\n flux.append(tmpflux)\n except StopIteration:\n pass\n return (np.asarray(lamb, dtype=FTYPE), np.asarray(flux, dtype=FTYPE))", "def load_dataset(sequence_length=10):\n train_x = []\n train_y = []\n notes_to_emotion = []\n song_index_to_notes = get_notes()\n song_index_to_emotion = get_emotions()\n\n for index, notes in song_index_to_notes.items():\n if index in song_index_to_emotion:\n notes_to_emotion.append((notes, song_index_to_emotion[index]))\n\n for notes, emotion in notes_to_emotion:\n # get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n for i in range(0, int(len(notes)) - sequence_length):\n music_in = notes[i: i + sequence_length]\n train_x.append([note_to_int[char] for char in music_in])\n train_y.append(emotion)\n\n print(\"train_x has shape: \", len(train_x))\n print(\"train_y has shape: \", len(train_y))\n\n return (np.asarray(train_x), np.asarray(train_y))", "def read_floats(self, count=1, location=None):\n return_vals = []\n byteorder = {'little':'<f', 'big':'>f'}[self._byteorder]\n if self._tiff is not None:\n off = self._offset\n if location is not None:\n off = location\n for c in range(count):\n return_vals.append(unpack_from(byteorder, self._tiff[off:off+4])[0])\n off += 4# size\n if location is None:\n self._offset += (count * 4) #size)\n return return_vals", "def read_model_data(model, filename):\n filename = os.path.join('./', '%s.%s' % (filename, 'params'))\n with open(filename, 'r') as f:\n data = pickle.load(f)\n lasagne.layers.set_all_param_values(model, data)", "def mapper(line): \n feats = line.strip().split(\",\") \n # labels must be at the beginning for LRSGD\n label = feats[len(feats) - 1] \n feats = feats[: len(feats) - 1]\n feats.insert(0,label)\n features = [ float(feature) for feature in feats ] # need floats\n return np.array(features)", "def read_data(self, path):\n if self.data_format == 'twenty': \n length = 20\n else: raise ValueError(\"self.data_format = '%s' unknown.\" % \n self.data_format)\n data = []\n with open(path,'r') as f:\n for line in f:\n data.append([float(line[k:(k + length)]) for k in range(\n 0, len(line.strip('\\n')),length)])\n return np.array(data)", "def loadModel(self, modelFilepath):\n \n fl = open(modelFilepath, 'rb')\n saveDict = pickle.load(fl)\n fl.close()\n\n self.memorySize = saveDict['memorySize']\n self.windowSize = saveDict['windowSize']\n self.optimizer = saveDict['optimizer']\n self.inputDimension = saveDict['inputDimension']\n self.encoderStateSize = saveDict['encoderStateSize']\n self.lstmStateSize = saveDict['lstmStateSize']\n self.memory = saveDict['memory']\n self.q = saveDict['q']\n\n self.gruEncoder = tf.keras.layers.GRUCell(units = self.encoderStateSize)\n self.gruEncoder.build(input_shape = (self.inputDimension,))\n self.gruEncoder.set_weights(saveDict['gruEncoder'])\n\n self.lstm = tf.keras.layers.LSTMCell(units = self.encoderStateSize)\n self.lstm.build(input_shape = (self.inputDimension,))\n self.lstm.set_weights(saveDict['lstm'])\n\n self.W = tf.Variable(saveDict['W'])\n self.A = tf.Variable(saveDict['A'])\n self.b = tf.Variable(saveDict['b'])", "def load(self, model_dir, use_text=True, use_history=True, use_network=True, delimiter=\",\"):\n self._clear_cache()\n # TODO: load parameters from filename!!!\n train_parts = []\n test_parts = []\n #load text feature matrix\n if use_text:\n tr_text = load_npz(os.path.join(model_dir, \"train_text.npz\"))\n te_text = load_npz(os.path.join(model_dir, \"test_text.npz\"))\n train_parts.append(tr_text.toarray())\n test_parts.append(te_text.toarray())\n print(\"text\", tr_text.shape, te_text.shape)\n #load history feature matrix\n if use_history:\n tr_history = np.loadtxt(os.path.join(model_dir, \"train_history.csv\"), delimiter=delimiter)\n te_history = np.loadtxt(os.path.join(model_dir, \"test_history.csv\"), delimiter=delimiter)\n train_parts.append(tr_history)\n test_parts.append(te_history)\n print(\"history\", tr_history.shape, te_history.shape)\n #load node embeddings\n if use_network:\n tr_network = np.loadtxt(os.path.join(model_dir, \"train_network.csv\"), delimiter=delimiter)\n te_network = np.loadtxt(os.path.join(model_dir, \"test_network.csv\"), delimiter=delimiter)\n train_parts.append(tr_network)\n test_parts.append(te_network)\n print(\"network\", tr_network.shape, te_network.shape)\n #concatenation\n X_tr = np.concatenate(train_parts, axis=1)\n X_te = np.concatenate(test_parts, axis=1)\n print(\"After concatenation:\", X_tr.shape, X_te.shape)\n #load labels\n self.tr_label = np.loadtxt(os.path.join(model_dir, \"train_label.csv\"), delimiter=delimiter)\n self.te_label = np.loadtxt(os.path.join(model_dir, \"test_label.csv\"), delimiter=delimiter)\n assert len(self.tr_label) == len(X_tr)\n assert len(self.te_label) == len(X_te)\n #load meta\n self.tr_meta = pd.read_csv(os.path.join(model_dir, \"train_meta.csv\"), delimiter=delimiter)\n self.te_meta = pd.read_csv(os.path.join(model_dir, \"test_meta.csv\"), delimiter=delimiter)\n assert len(self.tr_meta) == len(X_tr)\n assert len(self.tr_meta) == len(X_tr)\n return X_tr, X_te", "def load_data(model_path):\n x_arrays = []\n y_arrays = []\n for partition in iter_embeddings(model_path):\n h5f = h5py.File(partition, 'r')\n X = h5f[\"embeddings\"][:]\n x_arrays.append(X)\n try:\n Y = h5f[\"labels\"][:]\n y_arrays.append(Y)\n except KeyError:\n print(\"Labels not defined\")\n if len(y_arrays) > 0:\n X = np.vstack(x_arrays)\n Y = np.hstack(y_arrays)\n return X, Y\n else:\n X = np.vstack(x_arrays)\n Y = np.zeros(len(X))\n return X, Y", "def load(fname):\n with open(fname, 'rb') as inp:\n model = pickle.load(inp)\n\n if type(model) != NeuralNetwork:\n raise ImportError('Given file is not a neural network')\n\n return model", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def loadAll(self, path):\n self.model = keras.models.load_model(path+\"/model\")\n with open(path + \"/modelConfig.json\") as f:\n config = json.load(f)\n firstLayerConfig = config['config']['layers'][0]['config']\n lastLayerConfig = config['config']['layers'][-1]['config']\n self.lookBack = firstLayerConfig['batch_input_shape'][-1]\n self.forecast = lastLayerConfig['units']", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "def load_X_scaler(self, out_tag='lstm_scaler'): \n\n print ('loading X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n self.X_scaler = load(open('models/{}_X_scaler.pkl'.format(out_tag),'rb'))", "def _float_feature(value):\n\treturn tf.train.Feature(float_list=tf.train.FloatList(value=[value]))", "def load_regain_values(filename):\n gain_lines = open(filename,\"r\").readlines()\n gain_lines = [l.split() for l in gain_lines if len(l)>0 and l[0]!='#'] #remove comments and blanks\n tubes,gain_vals = zip(*[(int(l[0]),float(l[1])) for l in gain_lines])\n return Array(gain_vals)", "def read_line(l):\n return [read_float(l[s]) for s in slices['data']]" ]
[ "0.59494925", "0.5694197", "0.56045514", "0.5498191", "0.5488356", "0.5477136", "0.5466593", "0.54611206", "0.54511726", "0.54432213", "0.54343504", "0.5339388", "0.5335935", "0.5305303", "0.52712786", "0.5249649", "0.5237072", "0.5233914", "0.52248335", "0.5221053", "0.52194697", "0.52110696", "0.5199739", "0.5192997", "0.51913345", "0.5178447", "0.51667887", "0.51628536", "0.5158788", "0.5158788", "0.515743", "0.515743", "0.51573116", "0.5154916", "0.5153573", "0.51475024", "0.51401746", "0.51372313", "0.51349944", "0.51342905", "0.5133692", "0.5133256", "0.5131354", "0.51293916", "0.5126232", "0.51243705", "0.51211536", "0.51187474", "0.51141906", "0.5110785", "0.50968206", "0.5079057", "0.5078247", "0.5077829", "0.5075916", "0.50749177", "0.50721246", "0.50699747", "0.50668645", "0.50641394", "0.505902", "0.50570875", "0.50311786", "0.5030595", "0.5024053", "0.50224304", "0.50215226", "0.5019389", "0.50182045", "0.5010462", "0.5005751", "0.5001182", "0.500065", "0.49973312", "0.499232", "0.4991248", "0.49830234", "0.49810567", "0.49800995", "0.49793494", "0.49771595", "0.49771595", "0.49771595", "0.49771595", "0.49771595", "0.4973781", "0.4967978", "0.4967978", "0.4967978", "0.49655467", "0.49637777", "0.49617282", "0.49584007", "0.49554062", "0.49528974", "0.49528974", "0.49527958", "0.49526694", "0.49519414", "0.49481574" ]
0.5443327
9
Compact but complete string representation ('tostring').
def description(self): active = np.nonzero([bool(p) for p in self])[0] last_active = active[-1] if len(active) else -1 return ' '.join([p.value_str for p in self][:last_active + 1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_str(self) -> str:", "def toString():", "def safeToString():", "def __str__(self):\n\n if not self:\n return '\\0'\n\n parts = []\n for name, value in self:\n if value is None:\n item = name\n else:\n item = '%s=%s' % (name, value)\n if (not self.strict) and (len(item) > 255):\n item = item[:255]\n parts.append(chr(len(item)))\n parts.append(item)\n\n return ''.join(parts)", "def to_string(self):\r\n return self.__str__()", "def toString(self) -> str:\n raise NotImplementedError", "def str(self) -> str:\n return \"\".join(self)", "def ToString(self):\r\n pass", "def ToString(self):\r\n pass", "def __str__(self):\n buf = StringIO()\n self.write_to(buf)\n return buf.getvalue()", "def toString(self) -> unicode:\n ...", "def toString(self) -> unicode:\n ...", "def _to_string(self):\r\n parts = []\r\n if self.offering:\r\n parts.extend([self.org, self.offering])\r\n if self.branch:\r\n parts.append(u\"{prefix}+{branch}\".format(prefix=self.BRANCH_PREFIX, branch=self.branch))\r\n if self.version_guid:\r\n parts.append(u\"{prefix}+{guid}\".format(prefix=self.VERSION_PREFIX, guid=self.version_guid))\r\n return u\"+\".join(parts)", "def __str__(self):\n if self.is_empty():\n return \"\"\n return \" \".join(list(iter(self)))", "def c_str(self):\n return _libsbml.string_c_str(self)", "def __str__ (self) :\r\n a = []\r\n next_get = self.nextGet_\r\n buffer = self.buff_\r\n length = self.capacity()\r\n for x in xrange(len(self)) :\r\n a.append(str(buffer[next_get]))\r\n a.append(\" \")\r\n next_get = (next_get+1) % length\r\n \r\n return \"\".join(a)", "def __str__(self):\n return ''.join(self)", "def stringify_short(self):\n return self.stringify()", "def __str__(self):\n return bytes_to_str(bytes(self))", "def __str__(self):\n from nodepy.utils import array2strings\n\n c = array2strings(self.c,printzeros=True)\n A = array2strings(self.A)\n b = array2strings(self.b,printzeros=True)\n lenmax, colmax = _get_column_widths([A,b,c])\n\n s=self.name+'\\n'+self.info+'\\n'\n for i in range(len(self)):\n s+=c[i].ljust(colmax+1)+'|'\n for j in range(len(self)):\n s+=A[i,j].ljust(colmax+1)\n s=s.rstrip()+'\\n'\n s+='_'*(colmax+1)+'|'+('_'*(colmax+1)*len(self))+'\\n'\n s+= ' '*(colmax+1)+'|'\n for j in range(len(self)):\n s+=b[j].ljust(colmax+1)\n return s.rstrip()", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "def __str__(self):\n slist = self.buildstrings()\n local_s = ''\n for slistsub in range(0, len(slist)):\n local_s += slist[slistsub]\n if slistsub != len(slist)-1:\n local_s += '\\n'\n return local_s", "def __str__(self):\n\n if compat.PY3:\n return self.__unicode__()\n return self.__bytes__()", "def toString(self):\r\n str = \"\"\r\n for i in range(len(self.Data)):\r\n str += (self.__hexLookup[int(self.Data[i] / 16)]).decode()\r\n str += (self.__hexLookup[int(self.Data[i] % 16)]).decode()\r\n \r\n return str", "def __str__(self):\r\n\r\n if self._size > 0:\r\n\r\n lst = [str(self._data[item]) for item in range(self._size)]\r\n str1 = str(lst) + \" Capacity: \" + str(self._capacity)\r\n\r\n return str1\r\n\r\n else:\r\n return \"Empty Stack\"", "def __str__(self):\n return str(self.serialize())", "def as_str(self):\n return self.as_type(str)", "def ToString():\n @pass_failures\n def to_string(data):\n value = data.value\n if isinstance(value, Mapping):\n value = {k: str(v) for k, v in value.items()}\n else:\n value = str(value)\n data.value = value\n return data\n return to_string", "def asString(self):\n\n res = []\n for v in list(self.vars.values()):\n res.append(v.asString())\n res.append('')\n for e in list(self.enums.values()):\n res.append(e.asString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.defAsString())\n res.append('')\n for s in list(self.structs.values()):\n res.append(s.dataAsString())\n\n return '\\n'.join(res)", "def __str__(self):\n s = \"--\\n\"\n for element in self:\n s += element.__str__() + \"\\n\"\n s += \"--\"\n \"\"\"\n # Uncomment if you want to see the internal structure\n s = \"\\n--\\n\"\n for i in xrange(self.size):\n s += \"%d [%s, %s]\\n\" % ( i, self.slot[i], self.data[i] )\n s += \"--\"\n \"\"\"\n return s", "def __str__(self):\n return bytes_to_string(self._bytes)", "def __str__(self):\n out = \"\"\n for i, m in enumerate(self):\n out += \" \" + str(m) if i > 0 else str(m)\n out += \" \"\n return out", "def __str__(self) -> str:\n return self.encode()", "def str(self):\n return struct.pack(\n '!IIIIIIIIIII',\n self.magic,\n self.totalsize,\n self.off_dt_struct,\n self.off_dt_strings,\n self.off_mem_rsvmap,\n self.version,\n self.last_comp_version,\n self.size_dt_strings,\n self.size_dt_struct\n )", "def __str__(self):\n\n if self._b == b'':\n return ''\n\n if len(self.quote) == 1:\n s = self.to_short()\n else:\n s = self.to_long()\n\n assert eval('b' + self.quote + s + self.quote) == self._b\n\n return s", "def str(self, *args):\n return _libsbml.ostringstream_str(self, *args)", "def __str__(self):\n if __debug__:\n description = ('CM' in debug.active)\n else:\n description = False\n return self.asstring(short=False, header=True, summary=True,\n description=description)", "def dumps(self):\n return ''.join(self.out)", "def astr(obj):\n\treturn unicode(obj).encode(\"ascii\", \"replace\")", "def sstr(obj):\n if IS_PY2:\n # For lists and tuples in python2, remove unicode string representation characters.\n # i.e. ensure lists are printed as ['a', 'b'] and not [u'a', u'b']\n if type(obj) in [list]:\n return [sstr(item) for item in obj] # pragma: no cover # noqa\n elif type(obj) in [tuple]:\n return tuple(sstr(item) for item in obj) # pragma: no cover # noqa\n\n return unicode(obj).encode(DEFAULT_ENCODING) # pragma: no cover # noqa\n else:\n return obj # pragma: no cover", "def _tostr(t):\n\treturn t.__unicode__()", "def to_str(self):\n return pprint.pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())" ]
[ "0.755594", "0.74858665", "0.73231876", "0.72323847", "0.72257227", "0.7008717", "0.69522905", "0.69489115", "0.69489115", "0.6871342", "0.68474585", "0.68474585", "0.6772113", "0.6723712", "0.66784674", "0.6602459", "0.6586125", "0.658413", "0.65728617", "0.6562585", "0.6560252", "0.65574545", "0.65515614", "0.65463763", "0.65341693", "0.65248996", "0.6522734", "0.6516308", "0.65153813", "0.6501217", "0.6493496", "0.6423398", "0.6421834", "0.6414712", "0.6408935", "0.6404484", "0.6396319", "0.6394913", "0.6391458", "0.6365184", "0.63574314", "0.6332579", "0.63323236", "0.6330826", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855", "0.63213855" ]
0.0
-1
Load model from description string (parameters only).
def fromstring(self, description): self.header = {} # Split string either on commas or whitespace, for good measure param_vals = [p.strip() for p in description.split(',')] \ if ',' in description else description.split() params = [p for p in self] min_len = min(len(params), len(param_vals)) for param, param_val in zip(params[:min_len], param_vals[:min_len]): param.value_str = param_val for param in params[min_len:]: param.value = param.default_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self, model_path: str):", "def load_model(self, filename):\r\n pass", "def load(path_to_model):\n pass", "def load_model(self, path):\n pass", "def load_model(self) -> Any:", "def load_model(language_id, model_type):\n\n # getting the language code from it's id\n language_code = get_language_code(language_id)\n\n # getting the model name from it's type\n model_name = get_model_name(model_type)\n\n # building the model's full path\n model_full_path = \"%s/%s/%s.txt\" % (models_base_path, language_code, model_name)\n\n # returning the model loaded directly from file\n return load_model_from_file(model_full_path)", "def load_model(self):\n pass", "def parse(cls, model_path: str, **kwargs):", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load_model(self, **params):\n \t# file_name = params['name']\n # return pickle.load(gzip.open(file_name, 'rb'))", "def load(\n self,\n modelLoadPath\n ):\n pass", "def load(cls, filename, model_format):\n handle = ctypes.c_void_p()\n if not _isascii(model_format):\n raise ValueError('model_format parameter must be an ASCII string')\n model_format = model_format.lower()\n if model_format == 'lightgbm':\n _check_call(_LIB.TreeliteLoadLightGBMModel(c_str(filename),\n ctypes.byref(handle)))\n elif model_format == 'xgboost':\n _check_call(_LIB.TreeliteLoadXGBoostModel(c_str(filename),\n ctypes.byref(handle)))\n elif model_format == 'protobuf':\n _check_call(_LIB.TreeliteLoadProtobufModel(c_str(filename),\n ctypes.byref(handle)))\n else:\n raise ValueError('Unknown model_format: must be one of ' \\\n + '{lightgbm, xgboost, protobuf}')\n return Model(handle)", "def load_model(self,\n model: Union[str, io.IOBase, DM],\n name: Optional[str] = None):\n super().load_model(model, name=name)\n content = self.model[self.modelroot]\n\n self.key = content['key']\n self.id = content['id']\n self.family = content['system-family']\n self.__parameters = []\n for cp in content.aslist('calculation-parameter'):\n self.__parameters.append(dict(cp))", "def load_model(self, path_model: Optional[PathLike]) -> None:\n raise NotImplementedError", "def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model", "def load_model():\n return \"None\"", "async def load_model(\n self,\n model_name: str,\n headers: dict[str, t.Any] = ...,\n config: str = ...,\n files: dict[str, str] = ...,\n ) -> None:", "def parse_model_description(model_description: str) -> ModelDescription:\n root = ET.fromstring(model_description)\n\n defaults = _get_attribute_default_values()\n\n # mandatory p.32\n fmi_version = root.get(\"fmiVersion\")\n model_name = root.get(\"modelName\")\n guid = root.get(\"guid\")\n # optional\n description = root.get(\"description\", default=\"\")\n author = root.get(\"author\", default=\"\")\n copyright = root.get(\"copyright\", default=\"\")\n version = root.get(\"version\", default=\"\")\n license = root.get(\"license\", default=\"\")\n generation_tool = root.get(\"generationTool\", default=\"\")\n generation_date_and_time = root.get(\"generationDateAndTime\", default=\"\")\n variable_naming_convention = root.get(\"variableNamingConvention\", default=\"flat\")\n numberOfEventIndicators = root.get(\"numberOfEventIndicators\", default=0)\n\n model_variables = []\n\n \"\"\" Iterate over model variables:\n <ScalarVariable name=\"real_a\" valueReference=\"0\" variability=\"continuous\" causality=\"input\">\n <Real start=\"0.0\" />\n </ScalarVariable>\n \"\"\"\n for scalarVariable in root.iter(\"ScalarVariable\"):\n\n causality = scalarVariable.get(\"causality\", default=\"local\")\n variability = scalarVariable.get(\"variability\", default=\"continuous\")\n\n initial = scalarVariable.get(\"initial\", default=None)\n # defaults of initial depend on causality and variablilty\n # the combinations lead to 5 different cases denoted A-E on p.50\n if initial is None:\n initial, _ = get_intitial_choices_and_default(causality, variability)\n\n var = list(scalarVariable)[0]\n start = var.get(\"start\", default=None)\n dataType = var.tag\n\n model_variables.append(\n ScalarVariable(\n name=scalarVariable.get(\"name\"),\n valueReference=scalarVariable.get(\"valueReference\"),\n variability=variability,\n causality=causality,\n description=scalarVariable.get(\"description\", default=\"\"),\n initial=initial,\n start=start,\n dataType=dataType,\n )\n )\n\n log_categories = []\n for category in root.iter(\"Category\"):\n log_categories.append(category.get(\"name\"))\n\n model_structure = []\n\n # cosimulation\n cosim_element = root.find(\"CoSimulation\")\n\n modelIdentifier = cosim_element.get(\"modelIdentifier\")\n needsExecutionTool = cosim_element.get(\n \"needsExecutionTool\", default=defaults[\"needsExecutionTool\"]\n )\n canHandleVariableCommunicationStepSize = cosim_element.get(\n \"canHandleVariableCommunicationStepSize\",\n default=defaults[\"canHandleVariableCommunicationStepSize\"],\n )\n canInterpolateInputs = cosim_element.get(\n \"canInterpolateInputs\", default=defaults[\"canInterpolateInputs\"]\n )\n maxOutputDerivativeOrder = cosim_element.get(\n \"maxOutputDerivativeOrder\", default=defaults[\"maxOutputDerivativeOrder\"]\n )\n canRunAsynchronuously = cosim_element.get(\n \"canRunAsynchronuously\", default=defaults[\"canRunAsynchronuously\"]\n )\n canBeInstantiatedOnlyOncePerProcess = cosim_element.get(\n \"canBeInstantiatedOnlyOncePerProcess\",\n default=defaults[\"canBeInstantiatedOnlyOncePerProcess\"],\n )\n canNotUseMemoryManagementFunctions = cosim_element.get(\n \"canNotUseMemoryManagementFunctions\",\n default=defaults[\"canNotUseMemoryManagementFunctions\"],\n )\n canGetAndSetFMUstate = cosim_element.get(\n \"canGetAndSetFMUstate\", default=defaults[\"canGetAndSetFMUstate\"]\n )\n canSerializeFMUstate = cosim_element.get(\n \"canSerializeFMUstate\", default=defaults[\"canSerializeFMUstate\"]\n )\n providesDirectionalDerivative = cosim_element.get(\n \"providesDirectionalDerivative\",\n default=defaults[\"providesDirectionalDerivative\"],\n )\n\n def xs_boolean(s):\n if s is None:\n return None\n if s in {\"false\", \"0\"}:\n return False\n elif s in {\"true\", \"1\"}:\n return True\n else:\n raise ValueError(f\"Unable to convert {s} to xsd boolean\")\n\n def xs_normalized_string(s: str):\n if s is None:\n return None\n if not s.isprintable():\n raise ValueError(r\"normalized string can not contain: \\n, \\t or \\r\")\n return s\n\n def xs_unsigned_int(s: str):\n if s is None:\n return None\n value = int(s)\n if value > 4294967295:\n raise ValueError(\"xs:unsingedInt cannot exceed the value 4294967295\")\n return value\n\n cosimulation = CoSimulation(\n modelIdentifier=modelIdentifier,\n needsExecutionTool=xs_boolean(needsExecutionTool),\n canHandleVariableCommunicationStepSize=xs_boolean(\n canHandleVariableCommunicationStepSize\n ),\n canInterpolateInputs=xs_boolean(canInterpolateInputs),\n maxOutputDerivativeOrder=xs_unsigned_int(maxOutputDerivativeOrder),\n canRunAsynchronuously=xs_boolean(canRunAsynchronuously),\n canBeInstantiatedOnlyOncePerProcess=xs_boolean(\n canBeInstantiatedOnlyOncePerProcess\n ),\n canNotUseMemoryManagementFunctions=xs_boolean(\n canNotUseMemoryManagementFunctions\n ),\n canGetAndSetFMUstate=xs_boolean(canGetAndSetFMUstate),\n canSerializeFMUstate=xs_boolean(canSerializeFMUstate),\n providesDirectionalDerivative=xs_boolean(providesDirectionalDerivative),\n )\n\n return ModelDescription(\n fmiVersion=fmi_version,\n modelName=model_name,\n guid=guid,\n author=author,\n description=description,\n version=version,\n copyright=copyright,\n logCategories=log_categories,\n license=license,\n generationTool=generation_tool,\n generationDateAndTime=generation_date_and_time,\n variableNamingConvention=variable_naming_convention,\n numberOfEventIndicators=numberOfEventIndicators,\n CoSimulation=cosimulation,\n modelVariables=model_variables,\n modelStructure=model_structure,\n )", "def load_model(filename):\n return Model.load_savefile(filename)", "def read_model(self):\n \n # words dictionary\n f = open(self.name + \"_words\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.words = d\n\n # word_lengths dictionary\n f = open(self.name + \"_word_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.word_lengths = d\n\n # stems dictionary\n f = open(self.name + \"_stems\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.stems = d\n\n # sentence_lengths dictionary\n f = open(self.name + \"_sentence_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.sentence_lengths = d\n\n # ten most common words\n f = open(self.name + \"_common_word\", 'r') \n d_str = f.read()\n f.close()\n \n d = list(eval(d_str))\n self.common_word = d", "def load_model(\n model_dir, model_file=None, model_name=None, serialize_model=True, as_builder=False\n):\n if model_file and model_name:\n raise ValueError(\"only one of model_file and model_name should be set\")\n model_description_path = os.path.join(model_dir, MODEL_DESCRIPTION_FILENAME)\n\n if model_file:\n model = load_model_from_file(model_file, as_builder=as_builder)\n if serialize_model:\n tf.io.gfile.copy(model_file, model_description_path, overwrite=True)\n elif model_name:\n model = load_model_from_catalog(model_name, as_builder=as_builder)\n if serialize_model:\n with tf.io.gfile.GFile(\n model_description_path, mode=\"w\"\n ) as model_description_file:\n model_description_file.write(\n \"from opennmt import models\\n\"\n 'model = lambda: models.get_model_from_catalog(\"%s\")\\n' % model_name\n )\n elif tf.io.gfile.exists(model_description_path):\n tf.get_logger().info(\n \"Loading model description from %s\", model_description_path\n )\n model = load_model_from_file(model_description_path, as_builder=as_builder)\n else:\n raise RuntimeError(\n \"A model configuration is required: you probably need to \"\n \"set --model or --model_type on the command line.\"\n )\n\n return model", "def from_path(cls, path: str) -> Union[None, Type[AbstractModel]]:\n\n if not (path and isinstance(path, str)):\n msg = f\"Need a valid path to load a text/tagger model in AutoModel. \" \\\n f\"Found path={path} of type({type(path)})\"\n raise ValueError(msg)\n\n if not path.endswith(\".pkl\"):\n msg = \"Model Path must end with .pkl for AutoModel to be able to identify the model\"\n raise ValueError(msg)\n\n try:\n # if loading from path, determine the ABCModel type & return after doing xxxModel.load()\n model_config = AbstractModel.load_model_config(path)\n\n # get model type upon validation\n model_config = cls._resolve_model_config(model_config)\n model_type = cls._get_model_type(model_config)\n\n # load metadata and return\n if model_type == \"text\":\n model_class = AutoTextModel.get_model_class(model_config)\n elif model_type == \"tagger\":\n model_class = AutoTaggerModel.get_model_class(model_config)\n\n return model_class.load(path)\n\n except FileNotFoundError:\n # sometimes a model (and its config file) might not be dumped, eg. in role classifiers\n # or even if dumped, can be of NoneType enclosed in a dictionary\n return None", "def load_model():\n with open(MODEL_SAVE_JSON, 'r') as fp:\n json_string = fp.read()\n model = model_from_json(json_string)\n return model", "def load_model_by_name(model, global_step, device=None, path=\"/scratch/users/zucks626/ADNI/IPMI/checkpoints/\"):\r\n # path = \"/scratch/users/zucks626/ADNI/ae_cls/checkpoints/\"\r\n file_path = path + model.name + \"/\" + 'model-{:05d}.pt'.format(global_step)\r\n state = torch.load(file_path, map_location=device)\r\n model.load_state_dict(state)\r\n print(\"Loaded from {}\".format(file_path))", "def load_model(self, filename):\n model_object = self.s3_resource.Object(self.bucket_name, self.models_path + str(filename)).get()['Body'].read()\n model = pickle.loads(model_object)\n return model", "def load_model(fname: os.PathLike) -> Model:\n return Model.load(fname)", "def load_model(name, input_node):\n # Find the model class from its name\n all_models = models.get_models()\n net_class = [model for model in all_models if model.__name__ == name][0]\n\n # Construct and return the model\n return net_class({'data': input_node})", "def _load_model(self, loc):\n\n # If not a string, return input\n if not (isinstance(loc, str) or isinstance(loc, unicode)):\n return loc\n\n # If location is in S3, copy to local, then unpickle \n to_delete = False\n if \"s3\" in loc:\n tmp_loc = \"{0}/tmp_file_{1}.obj\".format(tmpdir, random.randint(1,1000))\n s3 = boto3.client('s3')\n bucket = loc.split(\"/\")[2]\n key = \"/\".join(loc.split(\"/\")[3:])\n with open(tmp_loc, \"wb\") as data:\n s3.download_fileobj(bucket, key, data)\n loc = tmp_loc\n to_delete = True\n with open(loc, \"rb\") as f:\n model = pickle.load(f)\n if to_delete:\n os.remove(tmp_loc)\n return model", "def load_model(self, filename):\n filename = path.join(self.root_path, f'models/{filename}.pkl')\n self.model = pickle.load(open(filename, \"rb\"))\n print('Successfully loaded model from '+filename)", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def _load_from(cls, model_state: dict) -> 'AbstractModel':\n raise NotImplementedError", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def load_model(path_model, model_type, device):\n if model_type == 'torch':\n model = torch.load(path_model).to(device)\n if hasattr(model, 'linblocks'):\n for linblock in model.linblocks:\n linblock.to(device)\n model.eval()\n return model\n elif model_type == 'sklearn':\n raise NotImplementedError\n else:\n raise Exception('Model type not known.')", "def load_model_from_file(model_full_path):\n\n # trying to load the model from file\n try:\n # opening the file that has the model data\n with codecs.open(model_full_path, 'r') as f:\n # reading the model data\n model_data = u\"%s\" % f.read()\n\n # escaping unicode characters (\\u00fb, etc.)\n # model_data = model_data.decode('unicode_escape')\n\n # building the model features\n model_features = eval(model_data)\n\n # returning the model features\n return model_features\n\n # in case of an exception\n except Exception as e:\n # printing exception message\n print(str(e))\n\n # retuning None\n return None", "def load_model(self, file=None):\n return None", "def load_model():\n logging.info(\"Load language model...\")\n ngram_arpa_t = pkg_resources.resource_filename(\"hwrt\", \"misc/ngram.arpa.tar.bz2\")\n with tarfile.open(ngram_arpa_t, \"r:bz2\") as tar:\n tarfolder = tempfile.mkdtemp()\n tar.extractall(path=tarfolder)\n ngram_arpa_f = os.path.join(tarfolder, \"ngram.arpa\")\n with open(ngram_arpa_f) as f:\n content = f.read()\n ngram_model = NgramLanguageModel()\n ngram_model.load_from_arpa_str(content)\n return ngram_model", "def load_model():\n with open(MODEL_FILENAME, \"rb\") as file:\n model = pickle.load(file)\n return model", "def load_model(self) -> None:\n\n try:\n model_class = MODEL_TYPES[self.model_type]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n\n if self.model_type == \"stable_diffusion\":\n self.model = model_class.from_pretrained(\n model_name_or_path,\n use_auth_token=self.auth_token,\n )\n else:\n self.model = model_class.from_pretrained(model_name_or_path)\n\n self.model.to(self.device)", "def from_string(\n string: str, *, formatter: Optional[ModelFormatter] = None\n ) -> \"Model\":\n formatter = formatter if formatter is not None else ModelJSONFormatter()\n return formatter.parse(string)", "def load_model(PATH):\n model = torch.load(PATH)\n model.eval()\n return model", "def load_model(\n model_path=filepath + \"/trained_models/hi2en/\", model_file_name=\"model.h5\"\n):\n model_path = (\n filepath + \"/trained_models/{}/\".format(model_path)\n if model_path in [\"en2hi\", \"hi2en\"]\n else model_path\n )\n config = SConfig(configuration_file=model_path + \"config.pkl\")\n s2s = Seq2Seq(config)\n s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)\n return s2s", "def load_model_from_catalog(name, as_builder=False):\n return catalog.get_model_from_catalog(name, as_builder=as_builder)", "def load(self, path):\n load_model(path, self)", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def load_model(name: str) -> spacy.language.Language:\r\n return spacy.load(name)", "def load(path, config=None, task=\"default\"):\n\n # Detect ONNX models\n if isinstance(path, bytes) or (isinstance(path, str) and os.path.isfile(path)):\n return OnnxModel(path, config)\n\n # Return path, if path isn't a string\n if not isinstance(path, str):\n return path\n\n # Transformer models\n models = {\n \"default\": AutoModel.from_pretrained,\n \"question-answering\": AutoModelForQuestionAnswering.from_pretrained,\n \"summarization\": AutoModelForSeq2SeqLM.from_pretrained,\n \"text-classification\": AutoModelForSequenceClassification.from_pretrained,\n \"zero-shot-classification\": AutoModelForSequenceClassification.from_pretrained,\n }\n\n # Load model for supported tasks. Return path for unsupported tasks.\n return models[task](path) if task in models else path", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def load_model(self, model_name, model_url):\n\n fname = join(self.root, model_name)\n if not isfile(fname):\n if self.verbose:\n print(\"Could not find \" + fname + \".. attempt download\")\n with urllib.request.urlopen(model_url) as res, open(fname, 'wb') as f:\n shutil.copyfileobj(res, f)\n if self.verbose:\n print(\"Download complete.. model: \" + fname)\n elif self.verbose:\n print(\"Found model \" + fname + \"! :)\")\n\n model = load_model(fname)\n self.model = model", "def load_model(uri: str, env: AbstractEnv = compat.env) -> \"Model\":\n from ell.predictions import Model\n\n uri = ensure_uri(uri)\n filesystem = env.get_fs_for_uri(uri)\n\n if uri.endswith(\"/\"):\n # If it's a directory, load the first \"*.pkl\" file in it\n glob_result = filesystem.glob(uri.file(\"*.pkl\"), detail=False)\n if not glob_result:\n raise FileNotFoundError(f\"Couldn't find a pickled model in {uri!r}\")\n uri = uri.file(os.path.basename(glob_result[0]))\n\n LOGGER.info(\"Loading model from %r\", uri)\n with filesystem.open(uri, \"rb\") as f:\n model = joblib.load(f)\n if not isinstance(model, Model):\n raise TypeError(\n f\"Expected loaded object to be of type AbstractClassifier, but got \"\n f\"{model.__class__.__name__}\"\n )\n LOGGER.info(\"Model loaded\")\n return model", "def load(cls, load_information: Dict):\n params = load_information[\"params\"]\n fit_kwargs_path = load_information[\"fit_kwargs\"]\n with open(fit_kwargs_path, \"rb\") as infile:\n fit_kwargs = cloudpickle.load(infile)\n model_path = load_information[\"get_model\"]\n with open(model_path, \"rb\") as infile:\n get_model = cloudpickle.load(infile)\n\n module = cls(get_model=get_model, fit_kwargs=fit_kwargs, **params)\n return module", "def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type", "def load_model(model_path):\n nlp = spacy.blank('en') \n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n #load pretrained model from the path\n ner = nlp.from_disk(model_path)\n return ner", "def load_model(model_path: str) -> object:\n model = torch.load(model_path)\n model.eval()\n return model", "def load(args):\n myparams = pickle.load(open(args.model+\".pickle\", \"rb\"))\n tagger = SimpleBiltyTagger(myparams[\"in_dim\"],\n myparams[\"h_dim\"],\n myparams[\"c_in_dim\"],\n myparams[\"h_layers\"],\n activation=myparams[\"activation\"])\n tagger.set_indices(myparams[\"w2i\"],myparams[\"c2i\"],myparams[\"tag2idx\"])\n tagger.predictors, tagger.char_rnn, tagger.wembeds, tagger.cembeds = \\\n tagger.build_computation_graph(myparams[\"num_words\"],\n myparams[\"num_chars\"])\n tagger.model.populate(args.model)\n print(\"model loaded: {}\".format(args.model), file=sys.stderr)\n return tagger", "def from_file(cls, name: str, mod_path: List[str] = [\".\"],\n description: str = None) -> \"DataModel\":\n with open(name, encoding=\"utf-8\") as infile:\n yltxt = infile.read()\n return cls(yltxt, mod_path, description)", "def load_model(self, filename):\n\n with open(filename, 'rb') as file:\n model_dict = pickle.load(file)\n\n self.model = model_dict['model']\n self.vectorizer = model_dict['vec']\n self.vectorized_data = model_dict['vec_data']\n self.df_topic_keywords = model_dict['df']", "def load_model(filepath=None, config=None, item=None):\n\n if filepath is None:\n raise ValueError(\"The filepath is None, please check the filepath is in the config file\")\n if '.h5' in filepath:\n keras_model = lm(filepath)\n reader = FeatureReader(config)\n features = reader.get_feature(dt.now())\n f = features[item]\n # for keras bug\n f = f.values.reshape(1,4,12)\n v = keras_model.predict(f)\n return keras_model\n else:\n return joblib.load(filepath)", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def load_model(gateway_name=None):\n if gateway_name and len(gateway_name) > 0:\n model = pk.load(open(\"models/\" + gateway_name + \"_model.pk\", \"r\"))\n else:\n model = pk.load(open(\"models/all_model.pk\", \"r\"))\n return model", "def load_model(str_filename):\n from .cell import WaveCell\n print(\"Loading model from %s\" % str_filename)\n data = torch.load(str_filename)\n try:\n wavetorch.core.set_dtype(data[\"cfg\"]['dtype'])\n except:\n pass\n model_state = data['model_state']\n model = WaveCell(model_state['dt'].numpy(),\n model_state['Nx'].numpy(), \n model_state['Ny'].numpy(), \n model_state['src_x'].numpy(), \n model_state['src_y'].numpy(), \n model_state['px'].numpy(), \n model_state['py'].numpy())\n model.load_state_dict(model_state)\n model.eval()\n return model, data[\"history\"], data[\"history_model_state\"], data[\"cfg\"]", "def load(self, model_name_or_path):\n return BertMLM(model_name_or_path, self.top_k)", "def load_model():\n with open(paths.model('model.pkl'), 'rb') as stream:\n return pickle.load(stream)", "def loadModel(name, path=None):\n\n # if a path is given, try to load from that path first\n if path:\n try:\n model = TFT5ForConditionalGeneration.from_pretrained(path)\n tokenizer = T5Tokenizer.from_pretrained(path)\n \n return model, tokenizer\n except:\n print(f\"WARNING: Could not load the model from the path ({path}) specified with --from-pretrained flag. Trying to load '{name}' from cloud instead.\")\n\n # if no path was specified, or the load from path failed, try to load from cloud using the given model name\n model = TFT5ForConditionalGeneration.from_pretrained(name)\n tokenizer = T5Tokenizer.from_pretrained(name)\n \n return model, tokenizer", "def load_model_from_file(path, as_builder=False):\n module = load_model_module(path)\n model = module.model\n if not as_builder:\n model = model()\n del sys.path_importer_cache[os.path.dirname(module.__file__)]\n del sys.modules[module.__name__]\n return model", "def load_model(self, fname):\n cxnlib.CXNNetLoadModel(self.handle, fname)", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])", "def loadmodel(fname):\n if not fname.endswith('.pickle.gz'):\n fname = fname + '.pickle.gz'\n with gzip.open(fname, 'r') as fin:\n D = load(fin)\n print 'Load model from file: {}'.format(fname)\n return D", "def load_model(self):\n self.__model = tf.keras.models.load_model(\n os.path.join(self.model_path, \"model.h5\")\n )\n print(\"[INFO] Model loaded!\")\n\n tok_pth = os.path.join(self.model_path, \"tokenizer.json\")\n with open(tok_pth, \"r\") as f:\n self.__tokenizer = tf.keras\\\n .preprocessing\\\n .text\\\n .tokenizer_from_json(f.read())\n print(\"[INFO] Tokenizer loaded!\")\n\n meta_pth = os.path.join(self.model_path, \"meta.json\")\n with open(meta_pth, \"r\") as f:\n meta = json.load(f)\n self.__title_len = meta[\"title_pad_length\"]\n self.__body_len = meta[\"body_pad_length\"]\n\n self.load_explainer()\n print(\"[INFO] Explainer loaded!\")", "def load_model(self,\n model: Union[str, io.IOBase, DM],\n name: Optional[str] = None):\n assert name is None, 'name is not used by this class'\n artifact = DM(model).find('artifact')\n self.url = artifact['web-link'].get('URL', None)\n self.label = artifact['web-link'].get('label', None)\n self.filename = artifact['web-link'].get('link-text', None)", "def load_seq_model():\n model = joblib.load(os.path.join(os.path.dirname(__file__), 'RuleSet3.pkl'))\n return model", "def load_model(task_id):\n # get model file name\n task_chain_id = task_id.split('-')[0]\n\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', task_chain_id)\n model_file_name = os.path.join(model_path, task_id + '.model')\n if not os.path.exists(model_file_name):\n raise Exception(\"Algorithm load_model not find model {}\".format(model_file_name))\n # load mode from disk\n model = load(model_file_name)\n\n return model", "def load_model():\n prepro = Prepro(PATH_STOPSWORD, PATH_ACRONYM)\n vectorizer = joblib.load(PATH_TFIDF)\n label_encoder = joblib.load(PATH_ENCODER)\n model_svm = joblib.load(PATH_SVM)\n model_nb = joblib.load(PATH_NB)\n model_lr = joblib.load(PATH_LR)\n return prepro, vectorizer, label_encoder, model_svm, model_nb, model_lr", "def load(self, filename, map_location=None):\n model_dict = torch.load(filename, map_location=map_location) if map_location is not None else torch.load(filename)\n model = SLDE(**model_dict[\"init_args\"])\n model.load_state_dict(model_dict[\"model_state_dict\"])\n return model", "def load_trained_model(unit):\n return load_model(DATA_FOLDER + \"{}_cdae_model.hd5\".format(UNITS[unit]))", "def read_model(self):\n filename = self.name + '_words'\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n d = dict(eval(d_str))\n self.words = d\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'r') \n d2_str = f.read() \n f.close()\n d2 = dict(eval(d2_str))\n self.word_lengths = d2\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'r') \n d3_str = f.read() \n f.close()\n d3 = dict(eval(d3_str))\n self.stems = d3\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'r') \n d4_str = f.read() \n f.close()\n d4 = dict(eval(d4_str))\n self.sentence_lengths = d4\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'r') \n d5_str = f.read() \n f.close()\n d5 = dict(eval(d5_str))\n self.punctuation = d5", "def load_model(filename_weight, filename_model):\n with open(filename_model, 'r') as file:\n model = model_from_json(file.read())\n file.close()\n\n model.load_weights(filename_weight)\n return model", "def load_model(self, folder_name):\n raise NotImplementedError()", "def deserialize(self, str):\n try:\n if self.model is None:\n self.model = articulation_msgs.msg.ModelMsg()\n if self.data is None:\n self.data = articulation_msgs.msg.ModelMsg()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.model.header.seq, _x.model.header.stamp.secs, _x.model.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.name = str[start:end].decode('utf-8')\n else:\n self.model.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.model.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.model.track.header.seq, _x.model.track.header.stamp.secs, _x.model.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.model.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.model.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.model.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v15 = val1.position\n _x = _v15\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v16 = val1.orientation\n _x = _v16\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v17 = val1.stamp\n _x = _v17\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.model.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v18 = val1.position\n _x = _v18\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v19 = val1.orientation\n _x = _v19\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v20 = val1.position\n _x = _v20\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v21 = val1.orientation\n _x = _v21\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.model.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.model.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.model.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.model.track.channels.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.header.seq, _x.data.header.stamp.secs, _x.data.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.name = str[start:end].decode('utf-8')\n else:\n self.data.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.params = []\n for i in range(0, length):\n val1 = articulation_msgs.msg.ParamMsg()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n _x = val1\n start = end\n end += 9\n (_x.value, _x.type,) = _struct_dB.unpack(str[start:end])\n self.data.params.append(val1)\n _x = self\n start = end\n end += 12\n (_x.data.track.header.seq, _x.data.track.header.stamp.secs, _x.data.track.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.data.track.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.data.track.header.frame_id = str[start:end]\n start = end\n end += 4\n (self.data.track.id,) = _struct_i.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v22 = val1.position\n _x = _v22\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v23 = val1.orientation\n _x = _v23\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_headers = []\n for i in range(0, length):\n val1 = std_msgs.msg.Header()\n start = end\n end += 4\n (val1.seq,) = _struct_I.unpack(str[start:end])\n _v24 = val1.stamp\n _x = _v24\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.frame_id = str[start:end].decode('utf-8')\n else:\n val1.frame_id = str[start:end]\n self.data.track.pose_headers.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_projected = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v25 = val1.position\n _x = _v25\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v26 = val1.orientation\n _x = _v26\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_projected.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.pose_resampled = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Pose()\n _v27 = val1.position\n _x = _v27\n start = end\n end += 24\n (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])\n _v28 = val1.orientation\n _x = _v28\n start = end\n end += 32\n (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])\n self.data.track.pose_resampled.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sI'%length\n start = end\n end += struct.calcsize(pattern)\n self.data.track.pose_flags = struct.unpack(pattern, str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.data.track.channels = []\n for i in range(0, length):\n val1 = sensor_msgs.msg.ChannelFloat32()\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n val1.name = str[start:end].decode('utf-8')\n else:\n val1.name = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n val1.values = struct.unpack(pattern, str[start:end])\n self.data.track.channels.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _load_from(cls, model_state: dict) -> AbstractModel:\n return cls(model=model_state.get('model'), **model_state.get('kwargs'))", "def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model", "def load_single_lstm_model(device, path):\n saved_model_data = torch.load(path, map_location=device)\n train_args = saved_model_data['args']\n model = build_eval_model_from_args(train_args, saved_model_data, device)\n return [model, train_args]", "def load(model_path: str):\n model = torch.load(model_path)\n model.eval()\n return model", "def load_model(path):\n # for example -> f\"{os.getcwd()}/trained_models\"\n return spacy.load(path)", "def load_model(program_name, model_context, aliases, filter_type, wlst_mode, validate_crd_sections=True):\n _method_name = 'load_model'\n\n variable_map = {}\n try:\n if model_context.get_variable_file():\n # callers of this method allow multiple variable files\n variable_map = variables.load_variables(model_context.get_variable_file(), allow_multiple_files=True)\n except VariableException, ex:\n __logger.severe('WLSDPLY-20004', program_name, ex.getLocalizedMessage(), error=ex,\n class_name=_class_name, method_name=_method_name)\n tool_exception = \\\n exception_helper.create_exception(aliases.get_exception_type(), 'WLSDPLY-20004', program_name,\n ex.getLocalizedMessage(), error=ex)\n __logger.throwing(tool_exception, class_name=_class_name, method_name=_method_name)\n raise tool_exception\n\n model_file_value = model_context.get_model_file()\n try:\n model_dictionary = merge_model_files(model_file_value, variable_map)\n except TranslateException, te:\n __logger.severe('WLSDPLY-09014', program_name, model_file_value, te.getLocalizedMessage(), error=te,\n class_name=_class_name, method_name=_method_name)\n tool_exception = \\\n exception_helper.create_exception(aliases.get_exception_type(), 'WLSDPLY-09014', program_name,\n model_file_value, te.getLocalizedMessage(), error=te)\n __logger.throwing(tool_exception, class_name=_class_name, method_name=_method_name)\n raise tool_exception\n\n try:\n variables.substitute(model_dictionary, variable_map, model_context)\n except VariableException, ex:\n __logger.severe('WLSDPLY-20004', program_name, ex.getLocalizedMessage(), error=ex,\n class_name=_class_name, method_name=_method_name)\n tool_exception = \\\n exception_helper.create_exception(aliases.get_exception_type(), 'WLSDPLY-20004', program_name,\n ex.getLocalizedMessage(), error=ex)\n __logger.throwing(tool_exception, class_name=_class_name, method_name=_method_name)\n raise tool_exception\n\n filter_helper.apply_filters(model_dictionary, filter_type, model_context)\n\n persist_model(model_context, model_dictionary)\n\n validate_model(program_name, model_dictionary, model_context, aliases, wlst_mode,\n validate_crd_sections=validate_crd_sections)\n\n return model_dictionary", "def load_model(model_name, MODEL_DIR):\n model_def_path = os.path.join(MODEL_DIR, model_name + '.py')\n weights_path = os.path.join(MODEL_DIR, model_name + '.pth')\n mod = load_module_2or3(model_name, model_def_path)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n return net", "def load_model(self, model_path):\n\n\t\tmodel_path = osp.abspath(model_path)\n\t\tmodel_weights_path = osp.splitext(model_path)[0] + \".bin\"\n\n\t\tself.Helpers.logger.info(\"Loading the model from '%s'\" % (model_path))\n\t\tmodel = self.context.ie_core.read_network(model_path, model_weights_path)\n\t\tself.Helpers.logger.info(\"Model loaded\")\n\n\t\treturn model", "def __init__(self, path, verbose=1):\n self.model = load_model(path)\n if verbose:\n self.model.summary()\n self.path = path", "def load_model_custom(file, object):\n return getattr(load_module(file), object)", "def _load(path):\n status = KerasOpenVINOModel._load_status(path)\n if status.get('xml_path', None):\n xml_path = Path(status['xml_path'])\n invalidInputError(xml_path.suffix == '.xml',\n \"Path of openvino model must be with '.xml' suffix.\")\n else:\n invalidInputError(False, \"nano_model_meta.yml must specify 'xml_path' for loading.\")\n xml_path = Path(path) / status['xml_path']\n return KerasOpenVINOModel(xml_path)", "def load(self, dataset, model_dir):\n raise NotImplementedError", "def test_load_model():\n model = BERTopic(language=\"Dutch\", embedding_model=None, n_components=12)\n model.save(\"test\")\n loaded_model = BERTopic.load(\"test\")\n assert type(model) == type(loaded_model)\n assert model.language == loaded_model.language\n assert model.embedding_model == loaded_model.embedding_model\n assert model.top_n_words == loaded_model.top_n_words\n assert model.n_neighbors == loaded_model.n_neighbors\n assert model.n_components == loaded_model.n_components", "def load_model(name):\n\tmodel = joblib.load(\"data/{}/{}.model\".format(name, name))\n\t# Setting n_jobs to 1 in case it was set to a higher number while training the model seems to makes predictions of single samples much faster.\n\tmodel.n_jobs = 1\n\treturn model", "def __init__(self, name:str, model_path:str, \n disabled:List[str]=[\"parser\", \"tagger\", \"lemmatizer\", \"attribute_ruler\"]):\n \n super(ModelAnnotator, self).__init__(name)\n self.model = spacy.load(model_path, disable=disabled)", "def load_model(\n domain: str,\n sub_domain: str,\n architecture: str,\n sub_architecture: Union[str, None],\n framework: str,\n repo: str,\n dataset: str,\n training_scheme: Union[str, None],\n sparse_name: str,\n sparse_category: str,\n sparse_target: Union[str, None],\n release_version: Union[str, None] = None,\n override_folder_name: Union[str, None] = None,\n override_parent_path: Union[str, None] = None,\n force_token_refresh: bool = False,\n ) -> Model:\n return Model.load_model(\n domain=domain,\n sub_domain=sub_domain,\n architecture=architecture,\n sub_architecture=sub_architecture,\n framework=framework,\n repo=repo,\n dataset=dataset,\n training_scheme=training_scheme,\n sparse_name=sparse_name,\n sparse_category=sparse_category,\n sparse_target=sparse_target,\n release_version=release_version,\n override_folder_name=override_folder_name,\n override_parent_path=override_parent_path,\n force_token_refresh=force_token_refresh,\n )", "def from_string(cls, dlstr):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def get_model_description() -> Dict[str, Any]:\n\n desc_file = os.path.join(os.path.dirname(__file__), \"model_files\", \"description.yaml\")\n if not os.path.exists(desc_file):\n raise FileNotFoundError(f\"File not found - {desc_file}\")\n else:\n with open(desc_file, \"r\") as f:\n desc = yaml.safe_load(f.read())\n if desc is None:\n raise RuntimeError(f\"Error parsing {desc_file}\")\n return desc", "def load(self, path, model_type='word2vec'):\n\n # Code for loading Word2vec model:\n if model_type == 'word2vec':\n self.__model = KeyedVectors.load_word2vec_format(path)\n self.__embedding = self.__model.wv\n\n # Code for loading fastText model:\n elif model_type == 'fasttext':\n self.__model = FastText.load_fasttext_format(path)\n self.__embedding = self.__model.wv\n\n # In case we're trying to load an unsupported model type:\n else:\n raise Exception(\"Model '{}' not supported (must be 'word2vec' or 'fasttext').\".format(model_type) +\n \" Cannot load word embedding model.\")", "def _load(self, req, id, body):\n context = req.environ['meteos.context']\n\n LOG.debug(\"Load model with request: %s\", id)\n\n try:\n model = self.engine_api.get_model(context, id)\n utils.is_valid_status(model.__class__.__name__,\n model.status,\n constants.STATUS_AVAILABLE)\n experiment = self.engine_api.get_experiment(\n context, model.experiment_id)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n\n self.engine_api.load_model(context,\n id,\n model.dataset_format,\n model.model_type,\n template.job_template_id,\n model.experiment_id,\n model.cluster_id)\n\n return {'model': {'id': id}}" ]
[ "0.694506", "0.6934816", "0.67814714", "0.6613543", "0.6562278", "0.64866006", "0.64678144", "0.6464032", "0.6308483", "0.61974365", "0.61927617", "0.61660534", "0.61602557", "0.61448705", "0.6130916", "0.61297876", "0.6126128", "0.61022365", "0.6065545", "0.6058375", "0.60465145", "0.6004944", "0.60041314", "0.6003037", "0.6000565", "0.60004586", "0.5983342", "0.59652424", "0.5958903", "0.5944485", "0.59348625", "0.5925595", "0.5923994", "0.5921511", "0.59092563", "0.5903826", "0.5903184", "0.5898397", "0.5888183", "0.58784914", "0.58624506", "0.5861371", "0.58423346", "0.58187854", "0.58054936", "0.58034056", "0.5802703", "0.58018106", "0.57924974", "0.5790934", "0.5788806", "0.57834846", "0.5781494", "0.57715994", "0.5771488", "0.5766767", "0.5756498", "0.574608", "0.5739759", "0.57246643", "0.5716862", "0.5705711", "0.56985015", "0.5695086", "0.56830335", "0.56687707", "0.56663525", "0.56661195", "0.5665044", "0.56644684", "0.56643164", "0.565487", "0.5640426", "0.5634837", "0.56257147", "0.56256247", "0.5622867", "0.5605633", "0.5602616", "0.5600149", "0.55997294", "0.55802375", "0.5576957", "0.55763", "0.556475", "0.5561475", "0.55599886", "0.55591303", "0.55566454", "0.55538756", "0.55534756", "0.5551717", "0.5550088", "0.5549087", "0.5544781", "0.55404544", "0.5539602", "0.5538007", "0.5536453", "0.55354136" ]
0.5545173
94
Save model to config file (both header and parameters).
def tofile(self, file_like): cfg = configparser.SafeConfigParser() cfg.add_section('header') for key, val in self.header.items(): cfg.set('header', key, str(val)) cfg.add_section('params') for param_str in self.param_strs(): cfg.set('params', param_str[0], '%s ; %s (%s)' % param_str[1:]) cfg.write(file_like)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_config(self, save_path: str) -> None:\n os.makedirs(save_path, exist_ok=True)\n model_hyperparameters_path = os.path.join(save_path, MODEL_HYPERPARAMETERS_FILE_NAME)\n save_json(model_hyperparameters_path, self.config_obj.to_dict())", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save(model, save_name):\n dirs = configparser.ConfigParser()\n dirs.read(\"config/dir_config.ini\")\n\n save_name = os.path.splitext(save_name)[0]\n path = os.path.join(dirs[\"save_dirs\"][\"models\"], save_name + \".h5\")\n info = os.path.join(dirs[\"save_dirs\"][\"models\"], save_name + \"_info.txt\")\n\n with open(info, \"w\") as file:\n model.summary(print_fn=lambda x: file.write(f\"{x}\\n\"))\n model.save(path, overwrite=False)", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save_model(model, model_filepath):", "def persist_model_config(self):\n config_dict = {}\n config_dict['coin_pair_dict'] = self.coin_pair_dict\n config_dict['feature_column_list'] = self.feature_column_list\n config_dict['feature_minutes_list'] = self.feature_minutes_list\n config_dict['trade_window_list'] = self.trade_window_list\n part_json = json.dumps(config_dict, indent=4)\n object_path = 'model_objects/'\n file_name = \"model_config.json\"\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key= object_path + file_name,\n Body= part_json)\n return", "def save_model(self, filename):\r\n pass", "def save_model_config(model_dir, config):\n config = config_util.prepare_config_for_save(config)\n config_path = _get_config_path(model_dir)\n with open(config_path, \"w\") as config_file:\n json.dump(config, config_file, indent=4)\n return config_path", "def save_model(model, model_path, model_name):\n config_dict = model.config\n os.makedirs(model_path, exist_ok=True)\n config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)\n with open(config_file, \"w\") as f:\n json.dump(config_dict, f)\n torch.save(model.state_dict(), model_file)", "def save_model(path: Path, model, config: dict):\n with open(path / \"model.pkl\", \"wb\") as p:\n pickle.dump(model, p)", "def save_model(self, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger.info('Saving model')\n dst_config_file = os.path.join(output_dir, self.CONFIG_FILE)\n if self.fullpath_input_configfile != dst_config_file:\n shutil.copy(self.fullpath_input_configfile, dst_config_file)\n\n pickle.dump(self.word_det_rfc,\n open(os.path.join(output_dir, self.WORD_DET_RFC), 'wb'))\n pickle.dump(self.reg_coeffs, open(\n os.path.join(output_dir, self.REGRESSION_PARAMS), 'wb'))", "def save_model(self, model_path: str):", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def save_model(self):\n if self.model:\n self.model.save(self.config[\"model_path\"])", "def save(path_to_model):\n pass", "def saveModel(self):\n with open(self.modelSaveFile, 'wb') as f:\n pickle.dump(self.values, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.policy, f, pickle.HIGHEST_PROTOCOL)", "def save_config(self, *args, **kwargs):\n raise NotImplementedError", "def save(self, save_path):\n # params\n model_params = {\n \"batch_size\": self.batch_size,\n \"lr\": self.lr,\n \"epsilon\": self.epsilon,\n \"gamma\": self.gamma,\n \"epsilon_min\": self.epsilon_min,\n \"epsilon_decay\": self.epsilon_decay,\n \"memory\": self.memory,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"_seed\": self._seed,\n }\n\n serialized_params = data_to_json(model_params)\n self.policy.save(save_path + \".h5\")\n\n # Check postfix if save_path is a string\n if isinstance(save_path, str):\n _, ext = os.path.splitext(save_path)\n if ext == \"\":\n save_path += \".zip\"\n\n # Create a zip-archive and write our params\n # there. This works when save_path\n # is either str or a file-like\n with zipfile.ZipFile(save_path, \"w\") as file_:\n # Do not try to save \"None\" elements\n file_.writestr(\"parameters\", serialized_params)", "def _save_model(self):\n with open(self.filepath, 'wb') as file:\n pickle.dump(self.cmodel, file)", "def save_model(self, path):\n pass", "def save(self,\n path,\n save_model=False):\n if save_model:\n self.model.save(path)\n\n h5dict = H5Dict(path)\n self._update_hdf5(h5dict, self.generator_train.command_dict, 'train')\n \n try:\n self._update_hdf5(h5dict, self.generator_val.command_dict, 'val')\n except AttributeError:\n pass", "def save(self, config_path):\n raise NotImplementedError()", "def save_to_conf(self):\n raise NotImplementedError", "def save(self, filename):\n # serialize model to JSON\n model_json = self._model.to_json()\n with open('models/' + filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n self._model.save_weights('models/' + filename + \".h5\")\n print(\"Saved model to disk\")", "def save_to_conf(self):\r\n raise NotImplementedError", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def save(model: nn.Module, path):\n save_model(model, path)", "def save():\n\n env.config.save(env.config_file)", "def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:", "def save( self ):\n try:\n state_dict = {\n 'epoch': self.epoch,\n 'epoch_loss': self.epoch_loss,\n 'global_step': self.global_step,\n 'mechanism_weights': self.mechanism_weights, # Save row.\n 'router_state': self.router.state_dict(), # Save router state.\n 'nucleus_state': self.nucleus.state_dict(), # Save nucleus state.\n 'optimizer_state': self.optimizer.state_dict(), # Save optimizer.\n }\n torch.save( state_dict, \"{}/model.torch\".format( self.config.neuron.full_path, self.epoch_loss ) )\n bittensor.logging.success(prefix='Saved model', sufix='<blue>{}/model.torch</blue>'.format( self.config.neuron.full_path ) )\n except Exception as e:\n logger.exception('Failed to save model with error:{}', e)", "def save_model(self, name: str):\n\n # Saving the current config\n self.cM.create_config(name + \".cfg\")\n\n # Saving all Vocabs\n pickle.dump(self.output_field.vocab, open(name + \".out_voc\", \"wb\"))\n pickle.dump(self.input_field.vocab, open(name + \".in_voc\", \"wb\"))\n\n # Saving the actual network\n if os.path.exists(name + \".auto\"):\n # If auto saving found, simply rename it\n logging.info(f\"Autostopper STOP\")\n os.rename(name + \".auto\", name + \".ph\")\n else:\n self.network.save_model(name + \".ph\")", "def save(self,model_path):\n pass\n # filename = \"Models/\"+model_path+\"1.sav\"\n # pickle.dump(self.crf_model, open(filename, 'wb'))", "def save_model(model, model_filepath):\n dump(model, model_filepath)", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def saveModel(model, file_name):\n with open(SAVE_PATH + file_name, \"wb\") as out_file:\n # wo do not want to save redundant data, so keys and vals are excluded\n pickle.dump(model, out_file)\n print(\"model save to\", SAVE_PATH + file_name)", "def save_model(self, model_file):\n net_params = self.get_policy_param() # get model params\n torch.save(net_params, model_file)", "def save(self, path):\n # create path if not exists\n try:\n os.stat(path)\n except:\n os.mkdir(path)\n # save models\n for key in self.parameter_dict:\n self.models[key].save(os.path.join(path, type(self).__name__ + '_%s.h5' % type(self).key_to_string(key)))\n # save historys\n with open(os.path.join(path, type(self).__name__ + ModelGrid._history_suffix), 'wb') as fp:\n pickle.dump(self.history, fp)\n # save parameter and hyperparameter dict\n with open(os.path.join(path, type(self).__name__ + ModelGrid._parameter_suffix), 'wb') as fp:\n pickle.dump((self.parameter_dict, self.hyperparameter_dict), fp)", "def save(self):\n print(\"==> Saving model to\", self.model_dir)\n self.model.save(self.model_dir)", "def save(self, path: utils.URLPath):\n save_somclassifier_config(self.config, path / \"config.json\")\n self.model.save(str(path / \"model.h5\"))\n io_functions.save_joblib(self.binarizer, path / \"binarizer.joblib\")\n\n io_functions.save_json(self.data_ids[\"validation\"], path / \"ids_validate.json\")\n io_functions.save_json(self.data_ids[\"train\"], path / \"ids_train.json\")", "def save(self, model_file):\n pickle.dump(self, open(model_file, 'wb'))", "def save_model(self, fpath):\n self._make_model_folder(fpath)\n self.model.save(os.path.join(fpath, U.MODEL_NAME), save_format=\"h5\")\n return", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def save(config: PyTextConfig, model: Model, meta: CommonMetadata) -> None:\n save_path = config.save_snapshot_path\n print(f\"Saving pytorch model to: {save_path}\")\n model.save_modules(base_path=config.modules_save_dir)\n state = OrderedDict(\n [\n (DATA_STATE, meta),\n (CONFIG_JSON, config_to_json(PyTextConfig, config)),\n (MODEL_STATE, model.state_dict()),\n ]\n ) # type: OrderedDict\n torch.save(state, save_path)", "def save_params(model_name: str):\n with open(model_name + '.params', 'w') as f:\n json.dump(pr.__dict__, f)", "def save_model_to_file(self, fn):\n js = json.loads(self.model.to_json())\n\n # Add this model's params\n js[\"rnn\"] = self.to_json()\n with open(fn, 'w') as fout:\n json.dump(js, fout)", "def save(self, model_path: str) -> None:\n metadata_string = json.dumps({ \"classes\": self.classes })\n with open(os.path.join(model_path, \"metadata.json\"), \"w\") as metadata_file:\n metadata_file.write(metadata_string)\n with self.graph.as_default():\n with self.session.as_default():\n self.model.save_weights(os.path.join(model_path, \"weights.h5\"))", "def save(self,\n filename):\n\n if self.model is None:\n raise ValueError('No model -- train or load model before saving!')\n\n # Check paths\n create_missing_folders([os.path.dirname(filename)])\n\n # Save settings\n logging.info('Saving settings to %s_settings.json', filename)\n\n settings = {'method': self.method,\n 'method_type': self.method_type,\n 'n_observables': self.n_observables,\n 'n_parameters': self.n_parameters,\n 'n_hidden': list(self.n_hidden),\n 'activation': self.activation}\n\n with open(filename + '_settings.json', 'w') as f:\n json.dump(settings, f)\n\n # Save state dict\n logging.info('Saving state dictionary to %s_state_dict.pt', filename)\n torch.save(self.model.state_dict(), filename + '_state_dict.pt')", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save(self,fname=None,save_name=None,ext='.model',save_txt=False,extra_info=None):\n import cPickle\n if save_name is None: save_name = self.model_name\n if fname is None:\n savepath = os.path.join(state_dir,save_name + '_' + self.tstring)\n if not os.path.isdir(savepath): os.makedirs(savepath)\n fname = os.path.join(savepath, 'model'+ext)\n else:\n savepath = os.path.split(fname)[0]\n\n sdict = {}\n for sname in self._params:\n save_prop = getattr(self,sname)\n if hasattr(save_prop,'get_value'): save_prop = save_prop.get_value() # in case we have CudaNdarrayType -> NP\n sdict[sname] = save_prop\n fh = open(fname,'wb')\n cPickle.dump(sdict,fh)\n fh.close()\n if verbose: print \"saving model to\", fname\n\n if save_txt:\n repr_string = self.__repr__()\n if not extra_info is None:\n repr_string += '\\n'\n if isinstance(extra_info,str):\n repr_string += extra_info\n elif isinstance(extra_info,list):\n for extra_item in extra_info:\n repr_string += str(extra_item)\n\n model_details_fname = os.path.join(savepath,'model_details.txt')\n with open(model_details_fname,'w') as fh:\n fh.write(repr_string)\n\n return fname", "def save_model(self,\n file_path: str = \"models/world_model.h5\"\n ):\n self.model.save(file_path)", "def save_model(self, output_model: ModelEntity):\n logger.info(\"called save_model\")\n buffer = io.BytesIO()\n hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True))\n labels = {label.name: label.color.rgb_tuple for label in self._labels}\n model_ckpt = torch.load(self._model_ckpt)\n modelinfo = {\n \"model\": model_ckpt,\n \"config\": hyperparams_str,\n \"labels\": labels,\n \"VERSION\": 1,\n }\n\n torch.save(modelinfo, buffer)\n output_model.set_data(\"weights.pth\", buffer.getvalue())\n output_model.set_data(\n \"label_schema.json\",\n label_schema_to_bytes(self._task_environment.label_schema),\n )\n output_model.precision = self._precision", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def save_model(self, filename):\n self.model.save('models/' + str(filename))", "def save_model(model):\n # ***\n # Please remove the comment to enable model save.\n # However, it will overwrite the baseline model we provided.\n # ***\n model.save(\"model/model.h5\")\n print(\"Model Saved Successfully.\")", "def save_model_params(self, full_path):\n \n file_to_save = file(full_path, 'wb')\n \n print(\"Saving model parameters to %s\"%full_path)\n \n cPickle.dump(self.theta, \n file_to_save, \n protocol=cPickle.HIGHEST_PROTOCOL)\n \n file_to_save.close()", "def save_model(self, fname):\n self.get_booster().save_model(fname)", "def save(self, path, suffix=0):\n if os.path.exists(path) and not self.overwrite:\n raise FileExistsError(\"Overwrite is False!\")\n else :\n os.makedirs(path, exist_ok=True)\n os.makedirs(path + \"/params\", exist_ok=True)\n info = {key: getattr(self, key) for key in self.attr_keys \n if key != \"model_params\"}\n pickle.dump(info, open(path + \"/data.b\", \"wb\"))\n pickle.dump(self.model_params, open(path + \"/params/param_\" + str(suffix) + \".b\", \"wb\"))\n yaml.dump(self.info_view(), open(path + \"/info.yaml\", \"w\"))", "def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)", "def save_model(self, model):\n # serialize model to JSON\n model_json = model.to_json()\n os.makedirs(os.path.dirname(self.model_json_path), exist_ok=True)\n with open(self.model_json_path, \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(self.model_weights_path)\n print(\"Saved model to disk\")", "def save_model(self, filename):\n\t\tpickle.dump(self, open(filename, 'wb'))\n\t\tprint('Model saved in',filename)", "def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)", "def save(\n self,\n modelSavePath\n ):\n pass", "def save_model(self):\n\n print('Save model')\n self.feature_extractor.save_weights(\n self.path_save_model + self.name_model + '.h5')\n\n print('Mean and std')\n np.save(self.path_save_model + 'mean.npy', self.mean)\n np.save(self.path_save_model + 'std.npy', self.std)", "def save_model(name, model):\n # Load path\n project_dir = Path(__file__).resolve().parents[2]\n model_path = os.path.join(project_dir, 'models', name + '.h5')\n\n # Save model\n model.save(model_path)", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save_model(self, path):\n save_path = self._saver.save(self._sess, path + '/model.ckp')\n\n attrs_to_save = {\n '_m': self._m,\n '_n': self._n,\n '_neighbourhood': self._neighbourhood,\n # '_topography': self._topography,\n '_num_iterations': self._num_iterations,\n '_Wts': self._Wts,\n '_locations': self._locations,\n '_centroid_grid': self._centroid_grid,\n '_learned': self._learned,\n 'abnormal_dist': self.abnormal_dist\n }\n\n output = open(path + '/som.pkl', 'wb')\n pickle.dump(attrs_to_save, output)\n output.close()\n print(\"Model saved in path: %s\" % save_path)\n\n pd.DataFrame(self._centroid_grid).to_csv(path + '/grid.csv', header=False, index=False)\n print('Grid saved to ' + path + ' for easy reading')", "def save_model_to_file(self, fn):\n js = json.loads(self.model.to_json())\n\n # Add this model's params\n js[\"rnn\"] = self.to_json()\n with open(fn, 'w') as fout:\n json.dump(js, fout)\n with open(os.path.join(self.model_dir, \"model.summary\"),'w') as summaryFile:\n self.model.summary(print_fn = lambda s: print(s, file=summaryFile))", "def save_model(self, path='./model_checkpoint', name='tf_model'):\n json_config = self._model.to_json()\n with open(os.path.join(path, name + '.json'), 'w') as json_file:\n json_file.write(json_config)\n weights_path = os.path.join(path, name + '_weights.h5')\n self._model.save_weights(weights_path)", "def save(self, path=None):\n if path is None:\n path = os.path.join(logger.get_dir(), \"model.pkl\")\n\n with tempfile.TemporaryDirectory() as td:\n save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n cloudpickle.dump((model_data, self._act_params), f)", "def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')", "def save_model(file_name, ep, model, optimizer):\n\n torch.save({\n 'epoch': ep,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, file_name) \n \n return", "def save_model(model, model_filepath): \n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(self, model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(self, model_file):\n net_params = self.get_policy_param() # get model params\n # torch.save(net, save_epoch + '.pkl') # 保存整个网络\n torch.save(net_params, model_file)", "def save(self):\n \n f = file(self.conf_file, \"w\")\n f.write(header + \"\\n\".join(map(str, self.db)) + \"\\n\")\n f.close()", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def save(self,sess):\n self.saver.save(sess,\"./Models/\" + self.mod_name + \".ckpt\")", "def save_model(self, model_file):\n m = {'b':self.b,\n 'w':self.w.tolist()}\n\n with open(model_file, 'w') as f:\n json.dump(m, f)", "def WriteSrnModelToFile(filename, model):\n\n # Write the .hpp file\n WriteHeaderFileForSrnModel(filename, model)\n\n # Write the .cpp fil\n WriteSourceFileForSrnModel(filename, model)", "def save(self) -> None:\n self._client.save_config()", "def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)", "def save_model(self):\n pass", "def save_model(self, file=None):\n return None", "def save_config(self):\n data = json.dumps(self.cfg)\n\n try:\n file = open(self.cfg_file_name, 'w')\n file.write(data)\n except OSError as err:\n print(\"can't save property: {0}\".format(err))\n else:\n file.close()", "def save(self, uri):\r\n pf = PyFolder(os.path.dirname(os.path.realpath(uri)), allow_override=True)\r\n pf[os.path.basename(uri)+\"_options.json\"] = {\r\n 'input_cells': self._input_cells,\r\n 'latent_space': self._latent_space,\r\n }\r\n\r\n save_model(self._autoencoder, uri+\"_lstm_autoencoder.hdf5\")\r\n save_model(self._encoder, uri+\"_lstm_encoder.hdf5\")", "def save(self, dest: str) -> None:\n # Get the state dictionary\n model_state = self.state_dict()\n\n # Add some information for our specific module:\n model_state['additional_state'] = {}\n model_state['additional_state']['configuration'] = self._configuration\n\n # Serialize model\n torch.save(model_state, dest)", "def save_model(model, model_filepath):\n pickle.dump( model, open( model_filepath, \"wb\" ) )", "def save_model(self, epoch):\n ckpt_path = os.path.join(self.config.save_path, f'{epoch}.pkl')\n print(f'Save parameters to {ckpt_path}')\n torch.save(self.model.state_dict(), ckpt_path)", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")", "def save_model_params(self):\n params_dict = self.get_model_params()\n if self.params_filepath is not None:\n file_params = data_functions.load_json(self.params_filepath)\n if file_params != params_dict: # cheking if the parametes for this\n # session are diffrent then those\n # in the source file\n self.session_number += 1\n\n curr_file_name = (\n self.params_file_name + PARAMS_UPDATE_FORMAT + 'json').format(\n sess=self.session_number,\n steps=self.samples_seen)\n\n data_functions.save_json(params_dict, curr_file_name, self.curr_folder)\n self.params_filepath = os.path.join(self.curr_folder, curr_file_name)", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save_configuration(model, NN_type, num_of_cells, num_of_CUEs = None, num_of_D2Ds = None):\n\n # Insert debugging assertions\n assert type(model) is keras.engine.sequential.Sequential, \"The 'model' must be sequential model.\"\n assert type(NN_type) is str, \"The 'NN_type' must be string.\"\n assert num_of_cells in constants.cell_range, f\"The 'num_of_cells' must be element in {constants.cell_range}.\"\n assert num_of_CUEs in constants.CUE_range or num_of_CUEs is None, f\"The 'num_of_CUEs' must be element in {constants.CUE_range}.\"\n assert num_of_D2Ds in constants.D2D_range or num_of_D2Ds is None, f\"The 'num_of_D2Ds' must be element in {constants.D2D_range}.\"\n\n # Get the path to the file to save the configuration to\n model_dir = pathlib.Path.cwd().joinpath('model')\n cell_dir = f'{num_of_cells}-cell'\n model_dir = model_dir.joinpath(cell_dir)\n\n if num_of_CUEs and num_of_D2Ds:\n file_name = f'configuration_Cell_{num_of_cells}_CUE_{num_of_CUEs}_D2D_{num_of_D2Ds}_{NN_type}.json'\n else:\n file_name = f'configuration_Cell_{num_of_cells}_{NN_type}.json'\n\n file_path = str(model_dir.joinpath(file_name))\n\n # Save the configuration in JSON format\n configuration = model.to_json()\n\n with open(file_path, 'w') as json_file:\n json_file.write(configuration)", "def save_model_config(model: BaseEstimator,\n base_path: Path,\n local_path: Path = Path('.')) -> dict:\n filepath = base_path.joinpath(local_path)\n save_model(model, filepath=filepath, save_dir='model')\n cfg_model = {\n 'flavour': Framework.SKLEARN.value,\n 'src': local_path.joinpath('model')\n }\n return cfg_model", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, 'params')\n with open(filename, 'w+') as f:\n pickle.dump(data, f)", "def save_model(self):\n filename=self.name + '_words'\n file_write(filename, self.words)\n\n filename2=self.name+'_word_lengths'\n file_write(filename2, self.word_lengths)\n\n filename3=self.name+'_stems'\n file_write(filename3, self.stems)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n file_write(filename4, self.sentence_lengths)\n\n filename5= self.endings+'_endings'\n file_write(filename5, self.endings)", "def save_model(model, model_filepath):\n\n outfile = open('model_filepath','wb')\n pickle.dump(model, outfile)\n outfile.close()", "def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)" ]
[ "0.763178", "0.74892414", "0.74538445", "0.74108565", "0.7388762", "0.73329777", "0.72090554", "0.7158334", "0.7151382", "0.7149014", "0.71323335", "0.71320057", "0.7099478", "0.7097612", "0.7079394", "0.7034409", "0.6995227", "0.69945264", "0.6982993", "0.69599545", "0.6917935", "0.6904722", "0.6898954", "0.68989027", "0.6889255", "0.6881435", "0.68806225", "0.68525386", "0.6852025", "0.684863", "0.68418646", "0.683934", "0.6838346", "0.6830717", "0.6821782", "0.6810863", "0.6808994", "0.67895293", "0.67867076", "0.6783859", "0.67757213", "0.6772699", "0.6771296", "0.67562944", "0.675574", "0.6754017", "0.6751607", "0.67499757", "0.6727668", "0.6719769", "0.6710497", "0.6701824", "0.66992146", "0.66972125", "0.66965103", "0.6695221", "0.6693449", "0.6685882", "0.66821945", "0.6678721", "0.66759765", "0.6668318", "0.66563517", "0.6653522", "0.6650618", "0.6646544", "0.66453874", "0.6641911", "0.6636905", "0.66047835", "0.6602958", "0.66006315", "0.65991545", "0.65943176", "0.6593505", "0.65904176", "0.6589002", "0.6583041", "0.6573184", "0.65688294", "0.65445673", "0.65410686", "0.65359133", "0.65352976", "0.6530639", "0.6529956", "0.6529447", "0.65282404", "0.6527035", "0.65231186", "0.65196973", "0.6519408", "0.65186054", "0.6517391", "0.65172917", "0.6515633", "0.6514598", "0.6510377", "0.6509199", "0.6507969", "0.65068555" ]
0.0
-1
Load model from config file (both header and parameters).
def fromfile(self, file_like): defaults = dict((p.name, p._to_str(p.default_value)) for p in self) if future.utils.PY2: cfg = configparser.SafeConfigParser(defaults) else: cfg = configparser.ConfigParser(defaults, inline_comment_prefixes=(';', '#')) try: cfg.readfp(file_like) if cfg.sections() != ['header', 'params']: raise configparser.Error('Expected sections not found in model file') except configparser.Error as exc: filename = getattr(file_like, 'name', '') msg = 'Could not construct %s from %s\n\nOriginal exception: %s' % \ (self.__class__.__name__, ('file %r' % (filename,)) if filename else 'file-like object', str(exc)) raise BadModelFile(msg) self.header = dict(cfg.items('header')) for param in defaults: self.header.pop(param.lower()) for param in self: param.value_str = cfg.get('params', param.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def load_model(\n self,\n model_name: str,\n headers: dict[str, t.Any] = ...,\n config: str = ...,\n files: dict[str, str] = ...,\n ) -> None:", "def load_config(filename):\n with open(filename, \"r\") as my_file:\n my_file = my_file.read()\n return K.models.model_from_json(my_file)", "def load_model(self, filename):\r\n pass", "def load_model(self, model_path: str):", "def load(path_to_model):\n pass", "def load_model(self, path):\n pass", "def load_model(self):\n pass", "def load(path, overwrite=False, suffix=0):\n\n if not os.path.exists(path):\n raise FileNotFoundError(\"Config file not found at {}\".format(path))\n else:\n info = pickle.load(open(path + \"/data.b\", \"rb\"))\n params = pickle.load(open(path + \"/params/param_\" + str(suffix) + \".b\", \"rb\"))\n info[\"model_params\"] = params\n return Config(info, overwrite=overwrite)", "def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]", "def load_model(self) -> Any:", "def load_model(self, **params):\n \t# file_name = params['name']\n # return pickle.load(gzip.open(file_name, 'rb'))", "def load_model(model_config_path, model_weights_path=None):\n with open(model_config_path, 'r') as f:\n model = model_from_yaml(f.read())\n\n if model_weights_path is not None:\n model.load_weights(model_weights_path)\n\n model.summary()\n\n return model", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load_model(filepath=None, config=None, item=None):\n\n if filepath is None:\n raise ValueError(\"The filepath is None, please check the filepath is in the config file\")\n if '.h5' in filepath:\n keras_model = lm(filepath)\n reader = FeatureReader(config)\n features = reader.get_feature(dt.now())\n f = features[item]\n # for keras bug\n f = f.values.reshape(1,4,12)\n v = keras_model.predict(f)\n return keras_model\n else:\n return joblib.load(filepath)", "def load(\n self,\n modelLoadPath\n ):\n pass", "def load_model(\n model_file_path: str = \"\",\n model_name: str = \"default\",\n cfg_path: str = None,\n) -> torch.nn.Module:\n cfg_path = cfg_path or Path(__file__).parent / \"config.yaml\"\n # assert model_name in model_file_path.split('_')[0], \"The checkpoint doesn't match with the selected model name\"\n\n # Load config file\n cfg = load_yaml_config(cfg_path)\n\n # Load pretrained weights.\n model = get_model(model_name, cfg)\n\n state_dict = torch.load(model_file_path)\n model.load_state_dict(state_dict)\n return model", "def load_model(self, name: str):\n\n # Loading config\n self.cM = ConfigManager(name + \".cfg\")\n\n # Loading Vocabs\n out_voc = pickle.load(open(name + \".out_voc\", \"rb\"))\n in_voc = pickle.load(open(name + \".in_voc\", \"rb\"))\n\n self.output_field.vocab = out_voc\n self.input_field.vocab = in_voc\n\n num_classes = len(self.output_field.vocab)\n embed = nn.Embedding.from_pretrained(self.input_field.vocab.vectors)\n self.network = FrameIDNetwork(self.cM, embed, num_classes)\n\n self.network.load_model(name + \".ph\")", "def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)", "def load_model(self, file=None):\n return None", "def load_model(self, model_dir):\n assert os.path.exists(model_dir), \\\n \"Folder %s with model files does not exist\" % (model_dir)\n\n config_file = os.path.join(model_dir, self.CONFIG_FILE)\n assert os.path.exists(config_file), \\\n \"Config file not found in model folder %s\" % (model_dir)\n\n rfc_file = os.path.join(model_dir, self.WORD_DET_RFC)\n assert os.path.exists(rfc_file), \\\n \"RFC pickle file not found in model folder %s\" % (model_dir)\n\n coeffs_file = os.path.join(model_dir, self.REGRESSION_PARAMS)\n assert os.path.exists(coeffs_file), \\\n \"Coefficients file not found in model folder %s\" % (model_dir)\n\n cfg_loaded_ok = self.load_config_file(config_file)\n assert cfg_loaded_ok, 'Config file params could not be loaded'\n\n self.word_det_rfc = pickle.load(open(rfc_file))\n self.reg_coeffs = pickle.load(open(coeffs_file))\n self.word_det_cnn = DataProcessorClient(self.word_det_model_name)\n self.fullpath_input_configfile = config_file\n if self.bb_reg_model_name:\n self.bb_reg = BoundingBoxRegressor(self.bb_reg_model_name)", "def _load_model(self):\n with open(self.filepath, 'rb') as file:\n self.cmodel = pickle.load(file)", "def load(config_file: typing.TextIO) -> \"TrainingConfig\":\n return TrainingConfig.from_json(config_file.read())", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def _load_model_from_metafile(self, model: str) -> Tuple[Config, str]:\n model = model.lower()\n\n assert self.scope is not None, (\n 'scope should be initialized if you want '\n 'to load config from metafile.')\n assert self.scope in MODULE2PACKAGE, (\n f'{self.scope} not in {MODULE2PACKAGE}!,'\n 'please pass a valid scope.')\n\n repo_or_mim_dir = BaseInferencer._get_repo_or_mim_dir(self.scope)\n for model_cfg in BaseInferencer._get_models_from_metafile(\n repo_or_mim_dir):\n model_name = model_cfg['Name'].lower()\n model_aliases = model_cfg.get('Alias', [])\n if isinstance(model_aliases, str):\n model_aliases = [model_aliases.lower()]\n else:\n model_aliases = [alias.lower() for alias in model_aliases]\n if (model_name == model or model in model_aliases):\n cfg = Config.fromfile(\n osp.join(repo_or_mim_dir, model_cfg['Config']))\n weights = model_cfg['Weights']\n weights = weights[0] if isinstance(weights, list) else weights\n return cfg, weights\n raise ValueError(f'Cannot find model: {model} in {self.scope}')", "def test_load_model_config(self) -> None:\n result = load_model_config()\n self.assertIs(type(result), dict)\n self.assertIsNot(result, {})", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def load_model_config(model_dir):\n config_path = _get_config_path(model_dir)\n with open(config_path, \"r\") as config_file:\n return json.load(config_file)", "def load_model(self) -> None:\n\n try:\n model_class = MODEL_TYPES[self.model_type]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n\n if self.model_type == \"stable_diffusion\":\n self.model = model_class.from_pretrained(\n model_name_or_path,\n use_auth_token=self.auth_token,\n )\n else:\n self.model = model_class.from_pretrained(model_name_or_path)\n\n self.model.to(self.device)", "def load_model(model_path, model_name, net=None):\n config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)\n assert os.path.isfile(\n config_file\n ), f'Could not find the config file \"{config_file}\". Are you sure this is the correct path and you have your model config stored here?'\n assert os.path.isfile(\n model_file\n ), f'Could not find the model file \"{model_file}\". Are you sure this is the correct path and you have your model stored here?'\n with open(config_file) as f:\n config_dict = json.load(f)\n if net is None:\n act_fn_name = config_dict[\"act_fn\"].pop(\"name\").lower()\n act_fn = act_fn_by_name[act_fn_name](**config_dict.pop(\"act_fn\"))\n net = BaseNetwork(act_fn=act_fn, **config_dict)\n net.load_state_dict(torch.load(model_file, map_location=device))\n return net", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def load_model(model_dir):\n model_params = sketch_rnn_model.get_default_hparams()\n with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:\n model_params.parse_json(f.read())\n\n model_params.batch_size = 1 # only sample one at a time\n eval_model_params = sketch_rnn_model.copy_hparams(model_params)\n eval_model_params.use_input_dropout = 0\n eval_model_params.use_recurrent_dropout = 0\n eval_model_params.use_output_dropout = 0\n eval_model_params.is_training = 0\n sample_model_params = sketch_rnn_model.copy_hparams(eval_model_params)\n sample_model_params.max_seq_len = 1 # sample one point at a time\n return [model_params, eval_model_params, sample_model_params]", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def load_model(cls, src_path, update_dict=None, steps=None):\n\n if steps is not None:\n json_file, _ = cls.get_file_via_steps(src_path, steps, 'json', STEPS_REGEX)\n hdf5_file, samples_seen = cls.get_file_via_steps(src_path, steps, 'hdf5',\n STEPS_REGEX)\n\n\n else:\n json_file = max(glob.iglob(os.path.join(src_path, '*.json')),\n key=os.path.getctime)\n hdf5_file = max(glob.iglob(os.path.join(src_path, '*.hdf5')),\n key=os.path.getctime)\n\n samples_seen = cls.get_pattern(hdf5_file, STEPS_REGEX)\n samples_seen = samples_seen if samples_seen is not None else 0\n\n session_number = cls.get_pattern(hdf5_file, SESS_REGEX)\n session_number = session_number if session_number is not None else 1\n\n params_dict = data_functions.load_json(json_file)\n\n params_dict['pretrained_weights'] = hdf5_file\n\n #TODO: try to rearange loading weights\n # if 'weights' in os.path.basename(hdf5_file):\n # params_dict['pretrained_weights'] = hdf5_file\n # else:\n # params_dict['checkpoint'] = hdf5_file\n\n params_dict['train_time'] = os.path.basename(src_path)\n if update_dict is not None:\n if 'pretrained_weights' or 'checkpoint' in update_dict:\n params_dict['pretrained_weights'] = None\n params_dict['checkpoint'] = None\n params_dict.update(update_dict)\n\n model = ClarifruitUnet(**params_dict)\n logger.info(f\"continuing training from {os.path.basename(hdf5_file)}\")\n\n setattr(model, 'samples_seen', samples_seen)\n setattr(model, 'params_filepath', json_file)\n setattr(model, 'session_number', session_number)\n\n return model", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def load_model(self,\n model: Union[str, io.IOBase, DM],\n name: Optional[str] = None):\n super().load_model(model, name=name)\n content = self.model[self.modelroot]\n\n self.key = content['key']\n self.id = content['id']\n self.family = content['system-family']\n self.__parameters = []\n for cp in content.aslist('calculation-parameter'):\n self.__parameters.append(dict(cp))", "def load_model(self):\n self.__model = tf.keras.models.load_model(\n os.path.join(self.model_path, \"model.h5\")\n )\n print(\"[INFO] Model loaded!\")\n\n tok_pth = os.path.join(self.model_path, \"tokenizer.json\")\n with open(tok_pth, \"r\") as f:\n self.__tokenizer = tf.keras\\\n .preprocessing\\\n .text\\\n .tokenizer_from_json(f.read())\n print(\"[INFO] Tokenizer loaded!\")\n\n meta_pth = os.path.join(self.model_path, \"meta.json\")\n with open(meta_pth, \"r\") as f:\n meta = json.load(f)\n self.__title_len = meta[\"title_pad_length\"]\n self.__body_len = meta[\"body_pad_length\"]\n\n self.load_explainer()\n print(\"[INFO] Explainer loaded!\")", "def load_model_config(model_dir: str):\n config_path = get_model_config_path(model_dir)\n config_content = file_io.read_file_to_string(config_path)\n config = yaml.safe_load(config_content)\n\n return config", "def load(self, path):\n load_model(path, self)", "def load(cls, path, forced_config=None, print_vars=False, frozen_graph=True,\n tf_session_target='', tf_device=''):\n # prepare config\n if forced_config is not None:\n c = forced_config\n model_dir = path\n else:\n # load config from file\n if os.path.isdir(path): # path is dir\n c = json.load(open(path + '/config.json'))\n model_dir = path\n\n # path is json file\n elif path.endswith('.json'):\n c = json.load(open(path))\n model_dir = os.path.dirname(path)\n\n # path is some filename\n else:\n c = json.load(open(os.path.dirname(path) + '/config.json'))\n model_dir = os.path.dirname(path)\n\n # get model filename\n checkpoint = tf.train.latest_checkpoint(model_dir)\n meta_graph = checkpoint + '.meta' if checkpoint is not None else 'no-checkpoint-found'\n\n # reset\n tf.set_random_seed(1234)\n tf.reset_default_graph()\n\n # model setting up\n model = cls(c)\n model.c = c\n config_proto = make_config_proto(c) # prepare proto config for tensorflow\n\n target = tf_session_target if tf_session_target else c.get('tf.session.target', '')\n device = tf_device if tf_device else c.get('tf.device', '')\n if target or device:\n print('Model Loader: tf session target:', target, 'and device:', device)\n\n # frozen graph loading\n if frozen_graph and not c.get('tf.skip_frozen_graph', False):\n frozen_path = os.path.join(model_dir, 'frozen_graph.pb')\n\n # convert graph to frozen if no file\n if not os.path.exists(frozen_path):\n freeze_graph.freeze_graph(None, None, True, checkpoint, c['predict.output'].split(':')[0],\n None, None, frozen_path, True, None, input_meta_graph=meta_graph)\n print('Frozen model converted and saved:', frozen_path)\n\n # load frozen model\n tf.reset_default_graph()\n with tf.device(device):\n cls.load_frozen_graph(frozen_path)\n model.sess = tf.Session(target=target, config=config_proto)\n # FIXME: tensorflow bug: intra_op_parallelism_threads doesn't affects system right after freeze_graph()\n\n # regular tensorflow model loading\n else:\n model.sess = tf.Session(target=target, config=config_proto)\n with tf.device(device):\n cls.load_meta_graph(model, meta_graph, checkpoint)\n\n # print variables from loaded model\n if print_vars:\n [print(i) for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)]\n print('--- Operations ---')\n [print(n.name) for n in tf.get_default_graph().as_graph_def().node]\n\n cls.check_deprecated(c)\n return model", "def load_model(self, filename):\n filename = path.join(self.root_path, f'models/{filename}.pkl')\n self.model = pickle.load(open(filename, \"rb\"))\n print('Successfully loaded model from '+filename)", "def load_model(\n model_path=filepath + \"/trained_models/hi2en/\", model_file_name=\"model.h5\"\n):\n model_path = (\n filepath + \"/trained_models/{}/\".format(model_path)\n if model_path in [\"en2hi\", \"hi2en\"]\n else model_path\n )\n config = SConfig(configuration_file=model_path + \"config.pkl\")\n s2s = Seq2Seq(config)\n s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)\n return s2s", "def load_existing(cls,config,silent=True):\n use_cuda = True if torch.cuda.is_available() else False\n if config.wandb_model:\n wandb_setup(config)\n\n ## load original configuration\n orig_config = None \n with open(os.path.join(config.existing_model,\"trainer_config.json\")) as oconfig:\n orig_config = Values(json.loads(oconfig.read()))\n orig_config.existing_model = config.existing_model\n\n model = NERModel(\n orig_config.model_name,\n orig_config.existing_model,\n use_cuda=use_cuda,\n args={\"silent\" : silent},\n )\n return cls(model,orig_config)", "def reload_from_file(self, path, model_num=0):\n self.struct, self.header = self.load_from_file(path)\n self._setup_self(model_num)", "def load_model(self):\r\n try:\r\n self.model = CRNN_STN(self.crnn_cfg())\r\n self.model.load_weights(config.CRNN_Model_Path)\r\n except:\r\n print('Error in method {0} in module {1}'.format('load_model', 'crnn_bridge.py'))", "def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))", "def loadAdjustedModel(self):\r\n # Load model in GUI\r\n addModel(self.trcFilePath.replace('.trc','.osim'))", "def initialize_model_from_cfg(args, gpu_id=0):\n model = model_builder.Generalized_RCNN()\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_caffe2_detectron_weights(model, args.load_detectron)\n\n model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n\n return model", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load_model(self):\n if self.save_path is not None:\n if isfile(self.save_path):\n self.model.load_state_dict(load(self.save_path))\n else:\n raise ValueError(\"Cannot find model save file: \" + self.save_path)", "def load(cls, load_information: Dict):\n params = load_information[\"params\"]\n fit_kwargs_path = load_information[\"fit_kwargs\"]\n with open(fit_kwargs_path, \"rb\") as infile:\n fit_kwargs = cloudpickle.load(infile)\n model_path = load_information[\"get_model\"]\n with open(model_path, \"rb\") as infile:\n get_model = cloudpickle.load(infile)\n\n module = cls(get_model=get_model, fit_kwargs=fit_kwargs, **params)\n return module", "def load_config(self):\n pass", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def load_model(self, path_model: Optional[PathLike]) -> None:\n raise NotImplementedError", "def load_model_params(self, full_path):\n \n print(\"Loading model parameters from %s\"%full_path)\n with open (full_path, 'rb') as f:\n \n self.theta = cPickle.load(f)\n \n if self.num_hidden == True or (self.num_hidden > 0):\n \n self.W, self.b, self.bhid = self.theta\n \n else:\n \n self.W, self.b = self.theta", "def load_model():\n with open(MODEL_FILENAME, \"rb\") as file:\n model = pickle.load(file)\n return model", "def load_model(language_id, model_type):\n\n # getting the language code from it's id\n language_code = get_language_code(language_id)\n\n # getting the model name from it's type\n model_name = get_model_name(model_type)\n\n # building the model's full path\n model_full_path = \"%s/%s/%s.txt\" % (models_base_path, language_code, model_name)\n\n # returning the model loaded directly from file\n return load_model_from_file(model_full_path)", "def read_config_file():\n config = ConfigParser()\n config.read('config.ini')\n\n # Get the model parameters as correct type\n batch_size = config.getint('model params', 'batch_size')\n resize_to = config.getint('model params', 'resize_to')\n train_prop = config.getfloat('model params', 'train_prop')\n\n return batch_size, resize_to, train_prop", "def load(cls, path: utils.URLPath):\n config = load_somclassifier_config(path / \"config.json\")\n model = keras.models.load_model(str(path / \"model.h5\"))\n binarizer = io_functions.load_joblib(path / \"binarizer.joblib\")\n\n data_ids = {\n \"validation\": io_functions.load_json(path / \"ids_validate.json\"),\n \"train\": io_functions.load_json(path / \"ids_train.json\"),\n }\n return cls(config, binarizer=binarizer, model=model, data_ids=data_ids, modeldir=path)", "def load(self, path):\n parameters = torch.load(path)\n\n if \"optimizer\" in parameters:\n parameters = parameters[\"model\"]\n\n self.load_state_dict(parameters)", "def load_model(self, fname):\n cxnlib.CXNNetLoadModel(self.handle, fname)", "def load_model():\n\t# Load model options\n\tprint ('Loading model parameters...')\n\twith open('%s.pkl'%path_to_umodel, 'rb') as f:\n\t\tuoptions = pkl.load(f)\n\twith open('%s.pkl'%path_to_bmodel, 'rb') as f:\n\t\tboptions = pkl.load(f)\n\n\t# Load parameters\n\tuparams = init_params(uoptions)\n\tuparams = load_params(path_to_umodel, uparams)\n\tutparams = init_tparams(uparams)\n\tbparams = init_params_bi(boptions)\n\tbparams = load_params(path_to_bmodel, bparams)\n\tbtparams = init_tparams(bparams)\n\n\t# Extractor functions\n\tprint ('Compiling encoders...')\n\tembedding, x_mask, ctxw2v = build_encoder(utparams, uoptions)\n\tf_w2v = theano.function([embedding, x_mask], ctxw2v, name='f_w2v')\n\tembedding, x_mask, ctxw2v = build_encoder_bi(btparams, boptions)\n\tf_w2v2 = theano.function([embedding, x_mask], ctxw2v, name='f_w2v2')\n\n\t# Tables\n\tprint ('Loading tables...')\n\tutable, btable = load_tables()\n\n\t# Store everything we need in a dictionary\n\tprint ('Packing up...')\n\tmodel = {}\n\tmodel['uoptions'] = uoptions\n\tmodel['boptions'] = boptions\n\tmodel['utable'] = utable\n\tmodel['btable'] = btable\n\tmodel['f_w2v'] = f_w2v\n\tmodel['f_w2v2'] = f_w2v2\n\n\treturn model", "def load_model(self):\n if os.path.exists(self.model_filename):\n self.model.load_weights(self.model_filename)", "def load_model(self, model_path, device='cpu'):\n model_dict = torch.load(model_path, map_location='cpu')\n model = SeparationModel(model_dict['config'])\n model.load_state_dict(model_dict['state_dict'])\n device = device if torch.cuda.is_available() else 'cpu'\n\n self.device = device\n\n model = model.to(device).eval()\n metadata = model_dict['metadata'] if 'metadata' in model_dict else {}\n self.model = model\n self.config = model_dict['config']\n self.metadata = metadata\n self.transform = self._get_transforms(\n self.metadata['transforms'])", "def load_model(config, bm):\n\n trainable_layers = [9, 10, 11]\n assert min(trainable_layers) >= 0 and max(trainable_layers) <= 11 # BERT has 12 layers!\n model = FineTunedBERT(device = config.device, n_classes = len(bm.classes()), trainable_layers = trainable_layers)\n\n # if we saved the state dictionary, load it.\n if config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def load_from_conf(self):\r\n raise NotImplementedError", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load_model():\n\n # find location of model\n\n file_path = '/Users/davidodwyer/Desktop' # to the directory\n file_name = 'original_mlr.joblib' \n the_file = os.path.join(file_path, file_name)\n\n # load model\n\n model = load(the_file)\n\n return model", "def load_model(sess, meta_file, checkpoint_file):\n saver = tf.train.import_meta_graph(meta_file)\n saver.restore(sess, checkpoint_file)\n \n configs = tf.get_collection('configs')\n pvars = tf.get_collection('placeholders')\n \n model_settings = dict()\n for c in configs:\n name = c.name.split(':')[0]\n model_settings[name] = sess.run(c)\n \n model_vars = dict()\n for p in pvars:\n name = p.name.split(':')[0]\n model_vars[name] = p\n model_vars['probs'] = tf.get_collection('probs')[0]\n \n return model_settings, model_vars", "def load(path, config=None, task=\"default\"):\n\n # Detect ONNX models\n if isinstance(path, bytes) or (isinstance(path, str) and os.path.isfile(path)):\n return OnnxModel(path, config)\n\n # Return path, if path isn't a string\n if not isinstance(path, str):\n return path\n\n # Transformer models\n models = {\n \"default\": AutoModel.from_pretrained,\n \"question-answering\": AutoModelForQuestionAnswering.from_pretrained,\n \"summarization\": AutoModelForSeq2SeqLM.from_pretrained,\n \"text-classification\": AutoModelForSequenceClassification.from_pretrained,\n \"zero-shot-classification\": AutoModelForSequenceClassification.from_pretrained,\n }\n\n # Load model for supported tasks. Return path for unsupported tasks.\n return models[task](path) if task in models else path", "def load_from_conf(self):\n raise NotImplementedError", "def load_model():\n with open(MODEL_SAVE_JSON, 'r') as fp:\n json_string = fp.read()\n model = model_from_json(json_string)\n return model", "def load_model(self, model_path: str, with_mask=True) -> None:\n checkpt = torch.load(model_path, map_location=self.device)\n model_utils.initialize_params(\n self.model, checkpt[\"state_dict\"], with_mask=with_mask\n )\n logger.info(f\"Loaded the model from {model_path}\")", "def load_model(\n model_dir, model_file=None, model_name=None, serialize_model=True, as_builder=False\n):\n if model_file and model_name:\n raise ValueError(\"only one of model_file and model_name should be set\")\n model_description_path = os.path.join(model_dir, MODEL_DESCRIPTION_FILENAME)\n\n if model_file:\n model = load_model_from_file(model_file, as_builder=as_builder)\n if serialize_model:\n tf.io.gfile.copy(model_file, model_description_path, overwrite=True)\n elif model_name:\n model = load_model_from_catalog(model_name, as_builder=as_builder)\n if serialize_model:\n with tf.io.gfile.GFile(\n model_description_path, mode=\"w\"\n ) as model_description_file:\n model_description_file.write(\n \"from opennmt import models\\n\"\n 'model = lambda: models.get_model_from_catalog(\"%s\")\\n' % model_name\n )\n elif tf.io.gfile.exists(model_description_path):\n tf.get_logger().info(\n \"Loading model description from %s\", model_description_path\n )\n model = load_model_from_file(model_description_path, as_builder=as_builder)\n else:\n raise RuntimeError(\n \"A model configuration is required: you probably need to \"\n \"set --model or --model_type on the command line.\"\n )\n\n return model", "def load_headmodel(name, prefix='data'):\n cond_file = op.join(prefix, name, name + '.cond')\n geom_file = op.join(prefix, name, name + '.geom')\n patches_file = op.join(prefix, name, name + '.patches')\n dip_file = op.join(prefix, name, name + '.dip')\n tdcs_file = op.join(prefix, name, name + '.hdtdcs')\n pot_file = op.join(prefix, name, name + '.pot')\n geom = om.Geometry()\n geom.read(str(geom_file), str(cond_file))\n sensors = om.Sensors()\n sensors.load(str(patches_file))\n model = {'geometry': geom, 'sensors': sensors}\n if op.exists(dip_file):\n dipoles = om.Matrix(str(dip_file))\n model['dipsources'] = dipoles\n if op.exists(tdcs_file):\n tdcs = om.Sensors(str(tdcs_file), geom)\n model['tdcssources'] = tdcs\n if op.exists(pot_file):\n pot = om.Matrix(str(pot_file))\n model['potentials'] = pot\n return model", "def load_model(filename):\n return Model.load_savefile(filename)", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_model_from_file(path, as_builder=False):\n module = load_model_module(path)\n model = module.model\n if not as_builder:\n model = model()\n del sys.path_importer_cache[os.path.dirname(module.__file__)]\n del sys.modules[module.__name__]\n return model", "def load_swin_model(model_path, cfg_path):\n # set up model config\n model = init_detector(cfg_path, model_path, device='cuda:0')\n return model", "def load_model(self, file_name):\n\t\tself.model.load_weights(file_name)", "def load_model(self, tmp_dir):\n pass", "def from_config(cls, *args, **kwargs):\n _config = args\n\n if isinstance(args, tuple): # multiple non-keyword arguments were provided\n if len(args) > 0:\n _config = args[0]\n\n else:\n _config = kwargs['config_path']\n kwargs.pop('config_path')\n\n local = False\n if 'make_new_path' in kwargs:\n local = True\n elif isinstance(_config, str) and os.path.isfile(_config):\n local = True\n elif isinstance(_config, dict) and \"category\" in _config:\n local = True\n\n if local:\n config = None\n config_path = None\n\n # we need to build ai4water's Model class\n if isinstance(_config, dict):\n config = _config\n else:\n config_path = _config\n return BaseModel._get_config_and_path(\n cls,\n config=config,\n config_path=config_path,\n **kwargs\n )\n\n # tf1.15 has from_config so call it\n return super().from_config(*args, **kwargs)", "def loadAll(self, path):\n self.model = keras.models.load_model(path+\"/model\")\n with open(path + \"/modelConfig.json\") as f:\n config = json.load(f)\n firstLayerConfig = config['config']['layers'][0]['config']\n lastLayerConfig = config['config']['layers'][-1]['config']\n self.lookBack = firstLayerConfig['batch_input_shape'][-1]\n self.forecast = lastLayerConfig['units']", "def load(model_file):\n return pickle.load(open(model_file))", "def load(self, config_instance):\r\n pass", "def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)", "def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def initialize_default_model(config: BareConfig, model_class) -> torch.nn.Module:\n model = model_class()\n default_model_path = f\"{config.get_default_model_folder_path()}/{model_class.__name__}.model\"\n model.load_state_dict(torch.load(default_model_path))\n return model", "def load_model(self, filename):\n model_object = self.s3_resource.Object(self.bucket_name, self.models_path + str(filename)).get()['Body'].read()\n model = pickle.loads(model_object)\n return model", "def load_model_from_file(model: torch.nn.Module, model_file_path: Path) -> None:\n\n if model_file_path.is_file():\n try:\n model.load_state_dict(torch.load(model_file_path))\n except Exception as e:\n logging.warning(\"Couldn't load model. Attempting to map CUDA tensors to CPU to solve error.\")\n else:\n logging.warning(\"Could not find model: {}\".format(model_file_path))\n raise FileExistsError(f\"Cannot load model file {model_file_path} into {model}...\")", "def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))", "def read_config():\n global batch_size, num_classes, num_filters, dropout_dim, dense_neurons\n global b_eval_advanced, pool_size, kernel_size, IMG_SIZE, epochs, img_cols, img_rows\n\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n batch_size = int(config['MODEL']['batch_size'])\n num_filters = int(config['MODEL']['num_filters'])\n dropout_dim = float(config['MODEL']['dropout_dim'])\n dense_neurons = int(config['MODEL']['dense_neurons'])\n _pool_size = config['MODEL']['pool_size']\n _kernel_size = config['MODEL']['kernel_size']\n IMG_SIZE = int(config['DATA']['image_size'])\n num_classes = int(config['CUSTOM']['num_classes'])\n epochs = int(config['MODEL']['epochs'])\n b_eval_advanced = (config['MODEL']['complex_analysis'] == 'true' or config['MODEL']['complex_analysis'] == 'True')\n\n pool_size = tuple(map(int, _pool_size.split(',')))\n kernel_size = tuple(map(int, _kernel_size.split(',')))\n\n img_rows, img_cols = IMG_SIZE, IMG_SIZE", "def load(path, model: Optional[nn.Module] = None, input_sample=None,\n inplace=False, device=None):\n return load_model(path, model, input_sample=input_sample,\n inplace=inplace, device=device)", "def load(cls, load_path, load_data=True, env=None, custom_objects=None, **kwargs):\n # Check if file exists if load_path is\n # a string\n if isinstance(load_path, str):\n if not os.path.exists(load_path):\n if not os.path.exists(load_path + \".zip\") or not os.path.exists(\n load_path + \".h5\"\n ):\n raise ValueError(\n \"Error: the file {} could not be found\".format(load_path)\n )\n\n # Open the zip archive and load data.\n try:\n with zipfile.ZipFile(load_path + \".zip\", \"r\") as file_:\n namelist = file_.namelist()\n # If data or parameters is not in the\n # zip archive, assume they were stored\n # as None (_save_to_file allows this).\n params = None\n if \"parameters\" in namelist and load_data:\n # Load class parameters and convert to string\n # (Required for json library in Python 3.5)\n json_data = file_.read(\"parameters\").decode()\n params = json_to_data(json_data, custom_objects=custom_objects)\n\n except zipfile.BadZipFile:\n print(\"ERROR: model could not be loaded\")\n return None\n\n model = cls(env=env)\n model.__dict__.update(params)\n model.__dict__.update(kwargs)\n\n model.obs_size = model.observation_space.shape[0]\n model.action_size = model.action_space.n\n model.policy = load_model(load_path + \".h5\")\n\n return model", "def load(self, model_name: str, model_dir: str = \"checkpoints\") -> None:\n self.model.load_state_dict(\n torch.load(os.path.join(model_dir, f\"{model_name}.pt\"))\n )", "def load(self, path):\n with path.open('rb') as f:\n weights = torch.load(f)\n load_model_from_dict(self.align, weights)\n # model_params = dict(self.align.named_parameters())\n # model_keys = sorted(model_params.keys())\n # print(model_keys)\n return self", "def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model" ]
[ "0.7542821", "0.7373315", "0.73305744", "0.7220506", "0.71582717", "0.71088517", "0.695264", "0.686733", "0.680585", "0.67566395", "0.6753529", "0.67380685", "0.67345", "0.6729725", "0.6692068", "0.6689554", "0.66882926", "0.668114", "0.6674514", "0.66543514", "0.65970093", "0.6562888", "0.6518016", "0.6516986", "0.65163666", "0.65112925", "0.6506336", "0.6472089", "0.6469603", "0.64674145", "0.64661", "0.6463173", "0.64587694", "0.6453242", "0.64494175", "0.64432174", "0.6440935", "0.640665", "0.64013815", "0.6390856", "0.6381476", "0.63635695", "0.63622344", "0.63544106", "0.6350444", "0.6345341", "0.63238204", "0.63112414", "0.63009024", "0.63009024", "0.63009024", "0.6282113", "0.6278451", "0.62745994", "0.6272744", "0.62710977", "0.62553", "0.6254429", "0.6246759", "0.62444407", "0.62328887", "0.62152743", "0.6209412", "0.62088", "0.6206792", "0.6203254", "0.62018436", "0.6197609", "0.61924934", "0.6180414", "0.61772466", "0.6167195", "0.61621255", "0.61601704", "0.6157458", "0.61527276", "0.61419326", "0.61403656", "0.61305827", "0.61179984", "0.6112472", "0.6111737", "0.6101201", "0.609958", "0.60995793", "0.60884243", "0.6082891", "0.60795325", "0.60780257", "0.60778195", "0.60732126", "0.60686266", "0.60679626", "0.6067465", "0.6065575", "0.6064879", "0.60636806", "0.6063544", "0.6053436", "0.6051364" ]
0.62608147
56
Load parameter values from the appropriate source.
def set(self, model=None): if isinstance(model, Model): if not isinstance(model, type(self)): raise BadModelFile('Cannot construct a %r from a %r' % (self.__class__.__name__, model.__class__.__name__)) self.fromlist(model.values()) self.header = dict(model.header) elif isinstance(model, basestring): self.fromstring(model) else: array = np.atleast_1d(model) if array.dtype.kind in 'iuf' and array.ndim == 1: self.fromlist(model) elif model is not None: self.fromfile(model) else: self.fromlist([])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_parameter(self):", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n self._data = load_dict[\"data\"][\"data\"][0]\n self._default = self._data\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n if \"explored_data\" in load_dict:\n self._explored_range = [\n x for x in load_dict[\"explored_data\"][\"data\"].tolist()\n ]\n self._explored = True\n\n self._locked = True", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def __loadParametersAndDefaults(self, dataPath, confFilename, nkeys, nvalues, keyType, valueType):\n params = self.loadConf(dataPath, confFilename=confFilename)\n\n # filter dict to include only recognized field names:\n for k in params.keys():\n if k not in SeriesLoader.BinaryLoadParameters._fields:\n del params[k]\n keywordParams = {'nkeys': nkeys, 'nvalues': nvalues, 'keytype': keyType, 'valuetype': valueType}\n for k, v in keywordParams.items():\n if not v:\n del keywordParams[k]\n params.update(keywordParams)\n return SeriesLoader.BinaryLoadParameters(**params)", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n self._data = load_dict[\"data\" + ArrayParameter.IDENTIFIER]\n\n if \"explored_data\" + ArrayParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + ArrayParameter.IDENTIFIER]\n\n idx = explore_table[\"idx\"]\n\n explore_list = []\n\n # Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'\n for name_idx in idx:\n arrayname = self._build_name(name_idx)\n explore_list.append(load_dict[arrayname])\n\n self._explored_range = [x for x in explore_list]\n self._explored = True\n\n except KeyError:\n super(ArrayParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True", "def load_params_from_file(self, input_file):\n\n ### FILL IN ###", "def _extract_load_parameters(env: EvalEnv, source_id: tuple) -> LoadParameters:\n source_constraints: List[SourceConstraint] = env[ENV_SOURCE_CONSTRAINTS]\n global_extent = None\n process_types = set()\n\n filtered_constraints = [c for c in source_constraints if c[0] == source_id]\n\n for collection_id, constraint in source_constraints:\n if \"spatial_extent\" in constraint:\n extent = constraint[\"spatial_extent\"]\n if \"resample\" not in constraint:\n extent = _align_extent(extent,collection_id[1][0],env)\n\n global_extent = spatial_extent_union(global_extent, extent) if global_extent else extent\n for _, constraint in filtered_constraints:\n if \"process_type\" in constraint:\n process_types |= set(constraint[\"process_type\"])\n\n _, constraints = filtered_constraints.pop(0)\n source_constraints.remove((source_id,constraints))\n\n params = LoadParameters()\n params.temporal_extent = constraints.get(\"temporal_extent\", [\"1970-01-01\", \"2070-01-01\"])\n params.spatial_extent = constraints.get(\"spatial_extent\", {})\n params.global_extent = global_extent\n params.bands = constraints.get(\"bands\", None)\n params.properties = constraints.get(\"properties\", {})\n params.aggregate_spatial_geometries = constraints.get(\"aggregate_spatial\", {}).get(\"geometries\")\n if params.aggregate_spatial_geometries is None:\n params.aggregate_spatial_geometries = constraints.get(\"filter_spatial\", {}).get(\"geometries\")\n params.sar_backscatter = constraints.get(\"sar_backscatter\", None)\n params.process_types = process_types\n params.custom_mask = constraints.get(\"custom_cloud_mask\", {})\n params.data_mask = env.get(\"data_mask\", None)\n if params.data_mask:\n _log.debug(f\"extracted data_mask {params.data_mask}\")\n params.target_crs = constraints.get(\"resample\", {}).get(\"target_crs\",None)\n params.target_resolution = constraints.get(\"resample\", {}).get(\"resolution\", None)\n params.resample_method = constraints.get(\"resample\", {}).get(\"method\", \"near\")\n params.pixel_buffer = constraints.get(\"pixel_buffer\", {}).get(\"buffer_size\", None)\n return params", "def load_parameters(self):\n json_data = open(\"param.json\")\n data = json.load(json_data)\n self.items = data[\"items\"]\n self.pollInterval = self.items[0]['poll_interval']", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n dump = load_dict[\"data\"]\n self._data = pickle.loads(dump)\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n try:\n self.v_protocol = load_dict[PickleParameter.PROTOCOL]\n except KeyError:\n # For backwards compatibility\n self.v_protocol = PickleParameter._get_protocol(dump)\n\n if \"explored_data\" in load_dict:\n explore_table = load_dict[\"explored_data\"]\n\n name_col = explore_table[\"idx\"]\n\n explore_list = []\n for name_id in name_col:\n arrayname = self._build_name(name_id)\n loaded = pickle.loads(load_dict[arrayname])\n explore_list.append(loaded)\n\n self._explored_range = explore_list\n self._explored = True\n\n self._default = self._data\n self._locked = True", "def load():\n\n global R, P, NP, update, update_available, region_dict\n\n loader = GoSmartParameterLoader(gosmart._prefix)\n loader.initiate()\n\n R = loader.get_regions()\n P, NP = loader.get_parameters()\n\n region_dict = loader.get_region_dict()\n\n update = gosmart.status.StatusUpdater()\n update_available = update.connect()", "def fill_missing_source_parameters(self) -> None:\n\n sp_dict = {sp.source: sp for sp in self.source_parameters}\n sp_out = list()\n for source in self.sources:\n if source in sp_dict:\n sp_out.append(sp_dict[source])\n else:\n sp_out.append(SourceParameterFactory(source))\n\n self.source_parameters = sp_out\n return", "def import_data(self, data):\n # Import additional data for tuning\n # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n pass", "def load_params(num_sources, fname):\n path_p = '/import/c4dm-04/alvarado/results/sampling_covariance/'\n # path_p = '/home/pa/Desktop/sampling_covariance/'\n pitches = [\"60\", \"64\", \"67\"]\n hparam = []\n lengthscale = []\n variance = []\n frequency = []\n\n for i in range(num_sources):\n hparam.append(pickle.load(open(path_p + fname + \"_M\" + pitches[i] + \"_hyperparams.p\", \"rb\")))\n lengthscale.append(hparam[i][1].copy())\n variance.append(hparam[i][2].copy() / sum(hparam[i][2].copy()))\n frequency.append(hparam[i][3].copy())\n\n return lengthscale, variance, frequency", "def load(self, request):\n\n value = request._get_parameter_value(self)\n\n if value.strings is None or len(value.strings) == 0:\n return\n\n try:\n value.object = self.unmarshal(request, value.strings)\n except Exception as e:\n msg = \"{}: {}\".format(e.__class__.__name__, str(e))\n self.add_error(request, msg)", "def load_params(self):\n return self.params", "def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params", "def _init_param_source():\n\n amp_sine = 1\n amp_rand = 1\n rollover = 100 # length of data to display\n update_delay = 100 # time between delays of update in ms\n param_source = ColumnDataSource(dict(\n amp_sine=[amp_sine],\n amp_rand=[amp_rand],\n rollover=[rollover],\n update_delay=[update_delay]\n ))\n return param_source", "def test_params_loading(datadir: Path):\n config_fn = datadir / \"datapane.yaml\"\n initial_vals = dict(p1=\"a\", p3=3)\n\n assert len(dp.Params) == 0\n\n # load some values\n api._reset_runtime(initial_vals)\n assert len(dp.Params) == 2\n assert dp.Params[\"p1\"] == initial_vals[\"p1\"]\n\n # clear and load again\n api._reset_runtime({})\n assert len(dp.Params) == 0\n api._reset_runtime(initial_vals)\n\n # load from file\n dp.Params.load_defaults(config_fn=config_fn)\n # ensure values are merged\n assert len(dp.Params) == 3\n assert dp.Params[\"p1\"] == \"hello\"\n assert dp.Params[\"p2\"] == 4\n assert dp.Params[\"p3\"] == initial_vals[\"p3\"]", "def load(self, *args, **kwargs):\n pass", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n serial_string = load_dict[\"data%s\" % SparseParameter.IDENTIFIER]\n self._data = self._reconstruct_matrix(serial_string)\n\n if \"explored_data\" + SparseParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + SparseParameter.IDENTIFIER]\n idx_col = explore_table[\"idx\"]\n explore_list = []\n for irun, name_idx in enumerate(idx_col):\n serial_string = load_dict[\n \"xspm%s%08d\" % (SparseParameter.IDENTIFIER, name_idx)\n ]\n matrix = self._reconstruct_matrix(serial_string)\n explore_list.append(matrix)\n\n self._explored_range = explore_list\n self._explored = True\n\n except KeyError as e:\n super(SparseParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True", "def load_params():\r\n return pickle.load(open('params.p', mode='rb'))", "def _use_existing_params(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/params'))\n self.params = sh['params']\n sh.close()", "def _load(self):\n for k,v in self.parameters.items():\n if isinstance(v,list):\n setattr(self,k,np.array(v,dtype=np.float32))\n else:\n setattr(self,k,v)", "def load(self,params):\n self._register.clear()\n for key in params:\n self._register[key] = params[key]", "def loadParams(self):\n\n if len(self.filParams) < 3:\n return\n\n if not os.access(self.filParams, os.R_OK):\n return\n\n print(\"Priors.loadParams INFO: loading priors from %s\" \\\n % (self.filParams))\n\n # This is a little bit painful without just using something\n # more mature like astropy.table or pandas:\n hypers = np.genfromtxt(self.filParams, usecols=(1,2))\n\n # Convert the angular arguments to radians\n hypers[4] = np.radians(hypers[4])\n hypers[5] = np.radians(hypers[5])\n hypers[7] = np.radians(hypers[7])\n\n # transpose into hyperparams\n self.hyper = np.transpose(hypers)\n\n # now we need to read in the function names. This only really\n # has meaning for the mixed prior...\n strNames = np.genfromtxt(self.filParams, usecols=(0), dtype='str')\n self.mixedNames = list(strNames)\n\n # Finally, read in the name of the function\n with open(self.filParams, 'r') as rObj:\n for sLine in rObj:\n if sLine.find('#') < 0:\n continue\n if sLine.find('NAME') < 0:\n continue\n\n vLine = sLine.strip().split()\n self.namePrior = vLine[-1]", "def _grab_injection_parameters_from_file(\n self, path, cls=None, add_nans=True, **kwargs\n ):\n if cls is None:\n from pesummary.core.file.injection import Injection\n cls = Injection\n data = cls.read(path, **kwargs).samples_dict\n for i in self.parameters:\n if i not in data.keys():\n data[i] = float(\"nan\")\n return data", "def load_params():\n with open('params.p', mode='rb') as in_file:\n return pickle.load(in_file)", "def on_load_parameters(self, filename=None):\n if filename is None:\n path, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Choose a parameter file.\", \"\", \"JSON Files (*.json)\")\n else:\n path = filename\n\n if path == '' or path is None:\n return\n\n self.param_file = path\n\n with open(self.param_file, 'r') as f:\n params = json.loads(f.read())\n\n obj_points = params['object positions']\n cam_pos = params['camera positions']\n dist_coeff = params['distortion coefficients']\n\n for p in obj_points:\n x, y = p['x'], p['y']\n lat, lon, alt = p['lat'], p['lon'], p['alt']\n self.add_known_image_points((x, y), latlonalt=(lat, lon, alt))\n\n self.camera_lat_line.setValue(float(cam_pos['lat']))\n self.camera_lon_line.setValue(float(cam_pos['lon']))\n self.camera_alt_line.setValue(float(cam_pos['alt']))\n self.cx_line.setValue(float(cam_pos['cx']))\n self.cy_line.setValue(float(cam_pos['cy']))\n self.phi_line.setValue(float(cam_pos['phi']))\n self.theta_line.setValue(float(cam_pos['theta']))\n self.psi_line.setValue(float(cam_pos['psi']))\n\n self.k1_line.setValue(float(dist_coeff['k1']))\n self.k2_line.setValue(float(dist_coeff['k2']))\n self.k3_line.setValue(float(dist_coeff['k3']))\n self.p1_line.setValue(float(dist_coeff['p1']))\n self.p2_line.setValue(float(dist_coeff['p2']))\n\n self.statusBar().showMessage(f'Loaded parameters from {self.param_file}')", "def load_params(self, path: str):\n DistributedWorker.load_params(self, path)\n\n params = torch.load(path)\n self.dqn.load_state_dict(params[\"dqn_state_dict\"])\n print(\"[INFO] loaded the model and optimizer from\", path)", "def load_params(self, params):\n params.cp_latest_filename = \"latest_checkpoint_v\"+params.version\n params.cp_load_latest_filename = \"latest_checkpoint_v\"+params.cp_load_ver\n params.cp_load_dir = params.out_dir + params.cp_load_name+ \"/checkpoints/\"\n if not hasattr(params, \"model_out_dir\"):\n params.model_out_dir = params.out_dir + params.model_name\n params.cp_save_dir = params.model_out_dir + \"/checkpoints/\"\n params.log_dir = params.model_out_dir + \"/logfiles/\"\n params.save_dir = params.model_out_dir + \"/savefiles/\"\n params.disp_dir = params.model_out_dir + \"/vis/\"\n params.num_pixels = int(np.prod(params.data_shape))\n self.params = params\n self.params_loaded = True", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def load_reco_param(source):\n if not (source is None or isinstance(source, (str, Mapping))):\n raise TypeError('`source` must be string, mapping, or None')\n\n if isinstance(source, str):\n orig_dict = from_file(source)\n\n elif isinstance(source, Mapping):\n orig_dict = source\n\n else:\n raise TypeError('Cannot load reco parameterizations from a %s'\n % type(source))\n\n valid_dimensions = ('coszen', 'energy')\n required_keys = ('dist', 'fraction', 'kwargs')\n\n # Build dict of parameterizations (each a callable) per flavintgroup\n reco_params = OrderedDict()\n for flavint_key, dim_dict in orig_dict.items():\n flavintgroup = NuFlavIntGroup(flavint_key)\n reco_params[flavintgroup] = {}\n for dimension in dim_dict.keys():\n dim_dist_list = []\n\n if not isinstance(dimension, str):\n raise TypeError(\"The dimension needs to be given as a string!\"\n \" Allowed: %s.\"%valid_dimensions)\n\n if dimension not in valid_dimensions:\n raise ValueError(\"Dimension '%s' not recognised!\"%dimension)\n\n for dist_dict in dim_dict[dimension]:\n dist_spec_dict = {}\n\n # allow reading in even if kwargs not present - computation of\n # transform will fail because \"loc\" and \"scale\" hard-coded\n # requirement\n for required in required_keys:\n if required not in dist_dict:\n raise ValueError(\"Found distribution property dict \"\n \"without required '%s' key for \"\n \"%s - %s!\"\n %(required, flavintgroup, dimension))\n\n for k in dist_dict.keys():\n if k not in required_keys:\n logging.warning(\n \"Unrecognised key in distribution property dict: '%s'\"%k\n )\n\n dist_spec = dist_dict['dist']\n\n if not isinstance(dist_spec, str):\n raise TypeError(\" The resolution function needs to be\"\n \" given as a string!\")\n\n if not dist_spec:\n raise ValueError(\"Empty string found for resolution\"\n \" function!\")\n\n try:\n dist = getattr(stats, dist_spec.lower())\n except AttributeError:\n try:\n import scipy\n sp_ver_str = scipy.__version__\n except:\n sp_ver_str = \"N/A\"\n raise AttributeError(\"'%s' is not a valid distribution\"\n \" from scipy.stats (your scipy\"\n \" version: '%s').\"\n %(dist_spec.lower(), sp_ver_str))\n logging.debug(\"Found %s - %s resolution function: '%s'\"\n %(flavintgroup, dimension, dist.name))\n\n dist_spec_dict['dist'] = dist\n\n frac = dist_dict['fraction']\n\n if isinstance(frac, str):\n frac_func = eval(frac)\n\n elif callable(frac):\n frac_func = frac\n\n else:\n raise TypeError(\n \"Expected 'fraction' to be either a string\"\n \" that can be interpreted by eval or a callable.\"\n \" Got '%s'.\" % type(frac)\n )\n\n dist_spec_dict['fraction'] = frac_func\n\n kwargs = dist_dict['kwargs']\n\n if not isinstance(kwargs, dict):\n raise TypeError(\n \"'kwargs' must hold a dictionary. Got '%s' instead.\"\n % type(kwargs)\n )\n\n dist_spec_dict['kwargs'] = kwargs\n for kwarg, kwarg_spec in kwargs.items():\n\n if isinstance(kwarg_spec, str):\n kwarg_eval = eval(kwarg_spec)\n\n elif callable(kwarg_spec) or isscalar(kwarg_spec):\n kwarg_eval = kwarg_spec\n\n else:\n raise TypeError(\n \"Expected kwarg '%s' spec to be either a string\"\n \" that can be interpreted by eval, a callable or\"\n \" a scalar. Got '%s'.\" % type(kwarg_spec)\n )\n\n dist_spec_dict['kwargs'][kwarg] = kwarg_eval\n\n dim_dist_list.append(dist_spec_dict)\n\n reco_params[flavintgroup][dimension] = dim_dist_list\n\n return reco_params", "def load_params():\n file_name = filedialog.askopenfilename(\n filetypes=[(\"JSON\", \"*.json\")])\n if file_name:\n self.parent_class.classes[\"fractal\"].curve.load_from_file(\n file_name)\n self.parent_class.classes[\"fractal\"].curve.set_parent_parameters(\n )\n self.rules_frame_class.fill_entries_from_rules(\n self.parent_class.classes[\"fractal\"].rules)\n # fill the entries in rules input on load\n self.set_recursion_depth_entry(\n self.parent_class.classes[\"fractal\"].recursion_depth)\n self.set_base_length_entry(\n self.parent_class.classes[\"fractal\"].base_length)\n self.rules_frame_class.render_preview()", "def load_parameters(self):\n with open(INTERNAL_DATA_DIR / self.name_default_params, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def load(self, *args, **kwargs) -> Any:\n pass", "def update_params(self, other):\n if isinstance(other, Params):\n found = False\n for key, param in other._src.items():\n if key in self._src:\n self._src[key] = param\n found = True\n\n if not found:\n raise RuntimeError(\n \"Tried to set parameters which do not exist in the target model.\"\n )\n else:\n raise RuntimeError(\"Attempt to stream non-parameter list to parameter list.\")", "def _read_custom_pars(self):\n if self.use_defaults:\n param_set = \"default_values\"\n else:\n param_set = \"parameters\"\n self.outpars = self.input_cfg_json_data[param_set][self.step_title]", "def load(self, data):\n\t\tif 'value' in data:\n\t\t\tself.value = data['value']", "def load_cls_params(self):\n with open('models/Final/linear_svc.p', 'rb') as model_file:\n model = pickle.load(model_file)\n self.svc = model['svc']\n self.X_scaler = model['X_scaler']\n self.parameters = model['parameters']\n\n print(self.parameters)", "def _inject_params(self, params):\n\n params.extend([LocaleParam(), CompileDomainsParam(),\n UseFuzzyParam(), StatisticsParam(),\n DirectoryParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def __init_values(self, values):\n for name, value in list(values.items()):\n if name in initializable_parameters:\n setattr(self, name, value)", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def load_params(param_file):\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params", "def load_sond(parameter_list):\n pass", "def load_params(params_filename: str) -> Dict:\n \n # If no params filename is specified, return the default parameter setting.\n if not params_filename:\n return RunParams()\n\n return RunParams(**load_json(params_filename))", "def load(self, filename):\n param_dict = pickle.load(open('%s' % filename, 'rb'))\n self.learningrate = param_dict['learningrate']\n self.verbose = param_dict['verbose']\n self._loadsize = param_dict['loadsize']\n self._batchsize = param_dict['batchsize']\n self.momentum = param_dict['momentum']\n self.epochcount = param_dict['epochcount']\n self._momentum_batchcounter = param_dict['momentum_batchcounter']\n for param_name in param_dict['incs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._incs[p].set_value(param_dict['incs'][param_name])\n if self.rmsprop is not None:\n for param_name in param_dict['avg_grad_sqrs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._avg_grad_sqrs[p].set_value(param_dict['avg_grad_sqrs'][param_name])\n self._numbatches = self._loadsize // self._batchsize\n if self._inputs_type != 'function':\n self._numloads = self._inputs.shape[0] // self._loadsize\n if self._inputs_type == 'h5':\n self._inputs_theano.set_value(\n self._inputs.read(stop=self._loadsize))\n else:\n self._inputs_theano.set_value(self._inputs[:self._loadsize])", "def load_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.loadEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def load_parameters(self, params):\n # load (aka. deep copy) parameters in params into network\n c=0\n self.params = []\n names = ['W_i']\n for n,p in zip(names, params):\n self.params.append(theano.shared(name = p.name,\n value = p.get_value(borrow=True)))\n \n setattr(self, n, self.params[c])\n c+=1\n assert( len(self.params) == c )", "def _load(self, inputVal):\n inputVal = XS_SCHEMA(inputVal)\n for xsID, inputParams in inputVal.items():\n self._value[xsID] = XSModelingOptions(xsID, **inputParams)\n return self._value", "def init_parameters(self, buildings, target_meters):\n if self.df is None:\n self.read_data_from_csv()\n\n if buildings is None:\n buildings = self.find_all_houses().tolist()\n\n if target_meters is None:\n target_meters = self.meter_name.keys()\n \n return buildings, target_meters", "def _load_parameters(self, default):\n params = {}\n for (key, value) in default:\n params[key] = self._parse_parameter(value)\n \n if not os.path.exists(self._datadir):\n os.makedirs(self._datadir)\n \n # Check if the file already exists, and create a new one, using the \n # passed default values, if necessary\n paramfile = os.path.join(self._datadir, self.id.lower() + '.cfg')\n if (os.path.isfile(paramfile)):\n paramjson = open(paramfile)\n params_var = json.load(paramjson)\n params.update(params_var)\n else:\n params_var = {}\n params_var['eta'] = [params['eta']]*24\n params_var['cov'] = [params['sigma']**2]*24\n params.update(params_var)\n \n with open(paramfile, 'w') as paramjson:\n json.dump(params_var, paramjson)\n \n return params", "def loadParams(self, paramsFile):\n dataDir = os.path.abspath(os.path.join(radiomics.__path__[0], 'schemas'))\n schemaFile = os.path.join(dataDir, 'paramSchema.yaml')\n schemaFuncs = os.path.join(dataDir, 'schemaFuncs.py')\n c = pykwalify.core.Core(source_file=paramsFile, schema_files=[schemaFile], extensions=[schemaFuncs])\n params = c.validate()\n\n inputImages = params.get('inputImage', {})\n enabledFeatures = params.get('featureClass', {})\n kwargs = params.get('setting', {})\n\n self.logger.debug(\"Parameter file parsed. Applying settings\")\n\n if len(inputImages) == 0:\n self.inputImages = {'Original': {}}\n else:\n self.inputImages = inputImages\n\n self.logger.debug(\"Enabled input images: %s\", self.inputImages)\n\n if len(enabledFeatures) == 0:\n self.enabledFeatures = {}\n for featureClassName in self.getFeatureClassNames():\n self.enabledFeatures[featureClassName] = []\n else:\n self.enabledFeatures = enabledFeatures\n\n self.logger.debug(\"Enabled features: %s\", enabledFeatures)\n\n # Set default settings and update with and changed settings contained in kwargs\n self.kwargs = self._getDefaultSettings()\n self.kwargs.update(kwargs)\n\n self.logger.debug(\"Settings: %s\", kwargs)", "def load_parameters(gp, target):\n with open(target) as f:\n pdict = json.load(f)\n gp.likelihood.set_state(pdict['likelihood'])\n gp.kern.variance.set_state(pdict['kern_variance'])\n gp.kern.lengthscales.set_state(pdict['kern_lengthscale'])\n #for p in pdict:\n # if p == 'warp_tanh.psi':\n # gp[p] = np.array(pdict[p]).reshape(3, 3)\n # else:\n # gp[p] = pdict[p]", "def _loadParamFromFile(config, section, paramName):\n\n # Get paramName from answer file\n value = config.get(section, paramName)\n\n # Validate param value using its validation func\n param = controller.getParamByName(paramName)\n _validateParamValue(param, value)\n\n # Keep param value in our never ending global conf\n controller.CONF[param.getKey(\"CONF_NAME\")] = value\n\n return value", "def __attrs_post_init__(self):\n # List of sources\n src_list = (\n self._source,\n self._plaintext,\n self._zlib,\n self._fname_plain,\n self._fname_zlib,\n self._dict_json,\n self._url,\n )\n src_count = sum(1 for _ in src_list if _ is not None)\n\n # Complain if multiple sources provided\n if src_count > 1:\n raise RuntimeError(\"At most one data source can be specified.\")\n\n # Leave uninitialized (\"manual\" init) if no source provided\n if src_count == 0:\n self.source_type = SourceTypes.Manual\n return\n\n # If general ._source was provided, run the generalized import\n if self._source is not None:\n self._general_import()\n return\n\n # For all of these below, '()' is passed as 'exc' argument since\n # desire _try_import not to handle any exception types\n\n # Plaintext str or bytes\n # Special case, since preconverting input.\n if self._plaintext is not None:\n self._try_import(\n self._import_plaintext_bytes, _utf8_encode(self._plaintext), ()\n )\n self.source_type = SourceTypes.BytesPlaintext\n return\n\n # Remainder are iterable\n for src, fxn, st in zip(\n (\n self._zlib,\n self._fname_plain,\n self._fname_zlib,\n self._dict_json,\n self._url,\n ),\n (\n self._import_zlib_bytes,\n self._import_plaintext_fname,\n self._import_zlib_fname,\n self._import_json_dict,\n self._import_url,\n ),\n (\n SourceTypes.BytesZlib,\n SourceTypes.FnamePlaintext,\n SourceTypes.FnameZlib,\n SourceTypes.DictJSON,\n SourceTypes.URL,\n ),\n ):\n if src is not None:\n self._try_import(fxn, src, ())\n self.source_type = st\n return", "def import_parameters(self, file_name):\n parameters = []\n\n with open(file_name) as in_file:\n parameters = json.load(in_file)\n\n if parameters:\n self.put_parameters(parameters)", "def load_parameters(self, session, data_dict):\n for w in self.weights:\n name = w.name.rsplit(':', 1)[0]\n if name in data_dict:\n session.run(w.assign(data_dict[name]))", "def load(self, p):\n return", "def load(self):\n\n # if the file doesn't exist, return\n if not os.path.exists(self.filepath):\n return\n\n # open the file and read in the raw values\n with open(self.filepath, 'r', encoding='utf-8') as fh:\n raw_values = json.loads(fh.read())\n\n # don't implicitly trust the raw values, but only get known keys\n for key in self.variables:\n if key in raw_values and 'value' in raw_values[key]:\n raw_value = raw_values[key]['value']\n self.variables[key]['value'] = raw_value", "def load_parms(self, file):\n ret_val = self._load_parms(file.encode())\n return ret_val", "def ReadParameterFile(pf):\n f = open(pf, \"r\")\n pf_dict = SetDefaultParameterValues()\n for line in f:\n if not line.split(): \n continue\n if line.split()[0][0] == \"#\": \n continue\n \n # This will prevent crashes if there is not a blank line at the end of the parameter file\n if line[-1] != '\\n': \n line += '\\n'\n \n # Cleave off end-of-line comments.\n line = line[:line.rfind(\"#\")].strip()\n \n # Read in the parameter name and the parameter value(s).\n parname, eq, parval = line.partition(\"=\")\n \n # Else, actually read in the parameter \n try: \n parval = float(parval)\n except ValueError:\n if re.search('/', parval): # For directory with more than one level\n parval = str(parval.strip())\n elif parval.strip().isalnum(): \n parval = str(parval.strip())\n elif parval.replace('_', '').strip().isalnum():\n parval = parval.strip()\n elif parval.partition('.')[-1] in ['dat', 'hdf5', 'h5', 'txt']:\n parval = str(parval.strip())\n else:\n parval = parval.strip().split(\",\")\n tmp = [] \n if parval[0][0] == '(':\n for element in parval: \n if element.strip(\" (,)\").isdigit(): \n tmp.append(float(element.strip(\"(,)\")))\n else: \n tmp.append(element.strip(\" (,)\"))\n parval = tuple(tmp) \n elif parval[0][0] == '[':\n for element in parval: \n tmp.append(float(element.strip(\"[,]\")))\n parval = list(tmp)\n else:\n print(parname, parval)\n raise ValueError('The format of this parameter is not understood.')\n \n pf_dict[parname.strip()] = parval\n \n return pf_dict", "def load_params(self):\n\n self.curr_ts_state = None\n\n # Get TS from param\n self.transition_system = import_ts_from_file(rospy.get_param('transition_system_textfile'))\n\n # Get monitored TS state model\n self.state_dimension_name = rospy.get_param(\"~state_dimension_name\", \"load\")\n\n # Get monitored action\n self.monitored_action = rospy.get_param(\"~monitored_action\", \"pick\")\n \n # Create dict to retrieve next state given current state and next action\n self.action_to_state = dict()\n for state in self.transition_system['state_models'][self.state_dimension_name]['nodes']:\n temp_dict = dict()\n for connected_state in self.transition_system['state_models'][self.state_dimension_name]['nodes'][state]['connected_to']:\n temp_dict.update({self.transition_system['state_models'][self.state_dimension_name]['nodes'][state]['connected_to'][connected_state]: connected_state})\n self.action_to_state.update({state: temp_dict})", "def loadParameters(self, parmfile=''):\n if not parmfile:\n raise IOError(\"You need to specify a parameter filename\")\n parmdir = os.getenv('ATMOSPHERE_PARAMETERS_DIR')\n parmpath = os.join.path(parmdir, parmfile)\n # Read from file\n with open(parmpath, 'r') as parmf:\n data = pickle.load(parmf)\n # Dictionary list\n self.modtran_visits = data[0]\n # Tuple list\n self.aerosol_visits = data[1]\n # seed value\n nruns = len(self.modtran_visits)\n print('Parameters for {1} runs computed with seed = {0}'.format(data[2],\n nruns))\n # Init transmission array\n self.initTransmissionArray(nruns)", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def load_parameter_file(filename: str) -> Dict:\n assert isinstance(filename, str) and len(filename) > 0\n param_dict = {}\n # TODO implement search through possible parameter config file locations\n # Open up the CSV file for reaching\n with open(filename) as f:\n csvreader = csv.DictReader(f, delimiter='\\t')\n\n accepted_field_names = {'mechanism': ['mechanism', 'mechanism_id'],\n 'param_name': [\"parameter_name\", \"parameter\", \"param\", \"param_name\"],\n 'part_id': ['part_id', 'part'],\n 'param_val': [\"val\", \"value\", \"param_val\", \"parameter_value\"]\n }\n\n field_names = Parameter._get_field_names(csvreader.fieldnames, accepted_field_names)\n\n if field_names['param_name'] is None:\n warn('No param name column was found, could not load parameter')\n return param_dict\n if field_names['mechanism'] is None:\n no_mechism_column = True\n else:\n no_mechism_column = False\n\n if field_names['part_id'] is None:\n no_part_id_column = True\n else:\n no_part_id_column = False\n\n for row in csvreader:\n # TODO what about integers? float might cause numerical drift in simulations, e.g. cooperativity=2.001\n param_value = float(row[field_names['param_val']])\n # TODO test all these cases!\n if row[field_names['param_name']] is None or len(row[field_names['param_name']]) == 0:\n pass\n elif no_mechism_column and no_part_id_column:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n elif no_mechism_column and no_part_id_column is False:\n if row[field_names['part_id']] is not None and len(row[field_names['part_id']]) > 0:\n part_id = row[field_names['part_id']]\n param_name = row[field_names['param_name']]\n param_dict[(part_id, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n elif no_part_id_column and no_mechism_column is False:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n else:\n if row[field_names['part_id']] is not None and len(row[field_names['part_id']]) > 0:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n part_id = row[field_names['part_id']]\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, part_id, param_name)] = param_value\n else:\n part_id = row[field_names['part_id']]\n param_name = row[field_names['param_name']]\n param_dict[(part_id, param_name)] = param_value\n else:\n if row[field_names['mechanism']] is not None and len(row[field_names['mechanism']]) > 0:\n mech_name = row[field_names['mechanism']]\n param_name = row[field_names['param_name']]\n param_dict[(mech_name, param_name)] = param_value\n else:\n param_name = row[field_names['param_name']]\n param_dict[param_name] = param_value\n\n return param_dict", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'tolerance'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def dynamo_load(self, value, *, context, **kwargs):\n raise NotImplementedError", "def load_yaml_params(self, params_file):\n self._update_params(params_file)", "def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass", "def load(proxy=None, mode=None, parameters=None, json_path=None):\n ps = load_params(json_path)\n \n try:\n ps = ps[proxy]\n except:\n raise KeyError(\"`proxy` incorrect. Try one of: ['{}']\".format(\"', '\".join(ps.keys())))\n\n try:\n ps = ps[mode]\n except:\n raise KeyError(\"`mode` incorrect. Try one of: ['{}']\".format(\"', '\".join(ps.keys())))\n \n try:\n ps = ps[parameters]\n except:\n raise KeyError(\"`parameters` incorrect. Try one of: ['{}']\".format(\"', '\".join(ps.keys())))\n \n p = params(values=ps)\n p.param_name = parameters\n \n return p", "def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])", "def load_parameters():\n\n retval = RP_LIB.rp_LoadLockboxConfig()\n if retval != 0:\n LOG.error(\"Failed to load parameters. Error code: %s\", ERROR_CODES[retval])", "def LoadParams(file):\n global globalParams\n global globalSections\n\n # check to see whether the file exists\n try: f = open(file, 'r')\n except IOError:\n fail('ERROR: parameter file does not exist: ', file)\n else:\n f.close()\n\n\n cp = ConfigParser.ConfigParser()\n cp.optionxform = str\n cp.read(file)\n\n globalSections = cp.sections()\n\n for sec in cp.sections():\n\n for opt in cp.options(sec):\n\n value = cp.get(sec, opt)\n \n # check in turn whether this is an interger, float, or string\n if (isInt(value)):\n globalParams[sec + \".\" + opt] = int(value)\n elif (isFloat(value)):\n globalParams[sec + \".\" + opt] = float(value)\n else:\n globalParams[sec + \".\" + opt] = value.strip()", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def _instantiate_parameter_states(self, context=None):\n\n from PsyNeuLink.Components.States.ParameterState import _instantiate_parameter_states\n _instantiate_parameter_states(owner=self, context=context)", "def load(self, source):\n try:\n inputdata = self.__inputmanager.read(source)\n self.__suitables = self.__inputmanager.map(inputdata)\n self.__data = inputdata\n except ValueError as e:\n print (\"Failed to load the dataset: %s\" % e)\n raise\n\n self.__modules = self.import_suitable_visualizations(self.__suitables)\n self.__has_datefields = self.__inputmanager.has_date_points()\n # Converting the datakeys into strings.\n self.__datakeys = [str(i) for i in list(self.__data[0].keys())]\n return self.__suitables", "def load_parameters(self, filename=None):\n if not filename:\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n params = numpy.load(filename)\n lasagne.layers.set_all_param_values(self.__network, params)", "def load_params(exe, prog, path, ignore_params=[]):\n if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):\n raise ValueError(\"Model pretrain path {} does not \"\n \"exists.\".format(path))\n\n logger.info('Loading parameters from {}...'.format(path))\n\n ignore_set = set()\n state = _load_state(path)\n\n # ignore the parameter which mismatch the shape\n # between the model and pretrain weight.\n all_var_shape = {}\n for block in prog.blocks:\n for param in block.all_parameters():\n all_var_shape[param.name] = param.shape\n ignore_set.update([\n name for name, shape in all_var_shape.items()\n if name in state and shape != state[name].shape\n ])\n\n if ignore_params:\n all_var_names = [var.name for var in prog.list_vars()]\n ignore_list = filter(\n lambda var: any([re.match(name, var) for name in ignore_params]),\n all_var_names)\n ignore_set.update(list(ignore_list))\n\n if len(ignore_set) > 0:\n for k in ignore_set:\n if k in state:\n logger.warning('variable {} not used'.format(k))\n del state[k]\n fluid.io.set_program_state(prog, state)", "def _inject_params(self, params):\n\n params.extend([DomainParam(), InputTemplateFileParam(),\n OutputDirectoryParam(), LocaleParam(),\n WidthParam(), NoWrapParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def load_params_from_file(self, fn):\n f = file(fn, 'r')\n params = json.load(f)\n return params", "def _reload_values(self):\r\n raise NotImplementedError", "def from_params(self, params):\n raise NotImplementedError()", "def get_dynamic_source_parameter(source):\n return [\n KwParameter(\n name=\"source\",\n annotation=source.type,\n default=None,\n _type=source.type,\n )\n ]", "def load_parameters(self, session, data_dict):\n for layer in self.layers:\n layer.load_parameters(session, data_dict)", "def load(self, path):\n parameters = paddle.load(path)\n self.set_dict(parameters)", "def get_additional_source_parameters(\n source_parameters: List[Tuple[str, str]],\n common_source_parameters: List[Tuple[str, str]],\n nhm_data: Dict[str, NHMFault],\n vel_mod_1d_layers: pd.DataFrame,\n):\n additional_source_parameters = pd.DataFrame(index=sorted(list(nhm_data.keys())))\n for param_name, filepath in source_parameters:\n parameter_df = pd.read_csv(\n filepath,\n delim_whitespace=True,\n header=None,\n index_col=0,\n names=[param_name],\n dtype={0: str},\n )\n additional_source_parameters = additional_source_parameters.join(\n parameter_df, how=\"outer\"\n )\n for param_name, value in common_source_parameters:\n try:\n value = int(value)\n except:\n try:\n value = float(value)\n except:\n pass\n additional_source_parameters[param_name] = value\n return additional_source_parameters", "def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())", "def auto_populate_parameters(self):\n run_arguments = get_func_arguments(self.run)\n\n if not run_arguments:\n return\n\n # ignore 'self' argument, should be safe-ish\n if \"self\" in list(run_arguments.keys()):\n run_arguments.pop(\"self\")\n\n for param_name, default_value in run_arguments.items():\n is_required = default_value == RequiresValueType\n if is_required:\n run_arguments[param_name] = str() # fill to make sure every argument has something\n\n if run_arguments:\n self.param_grid.from_data(run_arguments)\n self._parameters_auto_generated = True", "def conf_load_parameter(fin):\n err_msg = \"Unknown parameter definition. Excpected $par_name=(list|range|linspace).\"\n spec = fin.readline().strip().split('=')\n if len(spec) != 2:\n raise EnvironmentError(err_msg)\n par_name, par_def = [s.strip() for s in spec]\n if len(par_def) > 1 and par_def[0] == '[' and par_def[-1] == ']':\n return par_name, conf_load_par_list(par_def)\n elif len(par_def) > 3 and par_def.count(':') == 2 and par_def[-1] == 'l':\n return par_name, conf_load_par_linspace(par_def)\n elif par_def.count(':') == 2:\n return par_name, conf_load_par_range(par_def)\n else:\n raise EnvironmentError(err_msg + \" Found {0} for {1}\".format(par_def,par_name))", "def conf_load_parameters(fin,skeleton):\n err_msg = \"Unknown specification. Excpected VAR:(nonnegative int)\"\n spec = fin.readline().strip().split(':')\n if len(spec) != 2 or spec[0] != 'VAR' or not str_is_nonnegative_int(spec[1]):\n raise EnvironmentError(err_msg)\n num_of_vars = int(spec[1])\n pars_list = []\n for _ in xrange(num_of_vars):\n par_name, par_list = conf_load_parameter(fin)\n if par_name in pars_list:\n raise EnvironmentError(\"Parameter {} already defined.\".format(par_name))\n pars_list.append(par_name)\n skeleton.add_parameter(par_name, par_list)", "def set_params(self):\n debug = False\n if self.headless:\n self.has_header = self.headless_has_header\n else:\n if not os.path.exists(self.fpath):\n raise Exception(f'Unable to find file \"{self.fpath}\" for '\n 'importing. Please check that file exists.')\n size = ods_reader.get_ods_xml_size(self.fpath)\n if size > mg.ODS_GETTING_LARGE:\n ret = wx.MessageBox(_('This spreadsheet may take a while to '\n 'import.\\n\\nInstead of importing, it could be faster to '\n 'save as csv and import the csv version.' \n '\\n\\nImport now anyway?'), \n _('SLOW IMPORT'), wx.YES_NO|wx.ICON_INFORMATION)\n if ret == wx.NO:\n raise my_exceptions.ImportCancel\n else:\n ## Don't parse the data (too big) so you can display a\n ## sample and get a decision on having a header - just ask\n ## the user to tell us whether there is a header or not.\n importer.FileImporter.set_params(self)\n else:\n self._set_params_based_on_sample()", "def __init__(self, values: dict):\n\n self.sources = InstantaneousSourceDict", "def _initialize_params(self, parnames, sep=\".\"):\n for name in parnames:\n self.get_pvobj(name, sep)", "def initialize(filename='params.yaml'):\n home_path = str(Path.home())\n project_path = 'Documents/SideProjects/sailboatsfactory'\n work_path = 'src/nn-core'\n params_path = join(home_path, join(project_path, work_path))\n yaml_file = join(params_path, filename)\n print(\"Reading parameters from:\", filename)\n with open(yaml_file, 'r') as f:\n my_params = load(f)\n my_params['x_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n my_params['y_scaler'] = MinMaxScaler(feature_range=(-1, 1))\n\n raw = data.read(my_params)\n adjusted = adjust(raw, my_params)\n\n return adjusted, my_params", "def __prepare_parameter__(self, in_args):\n if self.__use_remote_sparse_updater__():\n self.__gradient_machine__.prefetch(in_args)\n self.__parameter_updater__.getParametersRemote()", "def get_params(self, paramFile):\n\n with open(paramFile, 'r') as f:\n titleLine = next(f)\n\n for line in f:\n p, i, v = line.split(\",\")\n\n self.params.update(p, v, i)", "def load_params(self, params):\n super(MlpModel, self).load_params(params)\n self.input_shape = [None,] + self.params.data_shape\n self.label_shape = [None, self.params.num_classes]\n self.mlp_act_funcs = [activation_picker(act_func_str)\n for act_func_str in self.params.mlp_activation_functions]", "def load_data(self) -> None:", "def _get_parameter_data(\n self, vis_hdu, read_source, run_check_acceptability, background_lsts=True\n ):\n # astropy.io fits reader scales date according to relevant PZER0 (?)\n # uvfits standard is to have 2 DATE parameters, both floats:\n # DATE (full day) and _DATE (fractional day)\n # cotter uvfits files have one DATE that is a double\n # using data.par('date') is general -- it will add them together if there are 2\n self.time_array = vis_hdu.data.par(\"date\")\n\n self.Ntimes = len(np.unique(self.time_array))\n\n # check if lst array is saved. It's not a standard metadata item in uvfits,\n # but if the file was written with pyuvdata it may be present\n # (depending on pyuvdata version)\n proc = None\n if \"LST\" in vis_hdu.data.parnames:\n # angles in uvfits files are stored in degrees, so convert to radians\n self.lst_array = np.deg2rad(vis_hdu.data.par(\"lst\"))\n if run_check_acceptability:\n (\n latitude,\n longitude,\n altitude,\n ) = self.telescope_location_lat_lon_alt_degrees\n uvutils.check_lsts_against_times(\n jd_array=self.time_array,\n lst_array=self.lst_array,\n latitude=latitude,\n longitude=longitude,\n altitude=altitude,\n lst_tols=self._lst_array.tols,\n frame=self._telescope_location.frame,\n )\n\n else:\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n # if antenna arrays are present, use them. otherwise use baseline array\n if \"ANTENNA1\" in vis_hdu.data.parnames and \"ANTENNA2\" in vis_hdu.data.parnames:\n # Note: we no longer subtract 1 from the antenna arrays\n # The antanna arrays are not indices but rather are numbers\n # that correspond to particular antennas\n self.ant_1_array = np.int32(vis_hdu.data.par(\"ANTENNA1\"))\n self.ant_2_array = np.int32(vis_hdu.data.par(\"ANTENNA2\"))\n # for instruments with a single subarray, the subarray should be set to 1s\n subarray = np.int32(vis_hdu.data.par(\"SUBARRAY\"))\n # error on files with multiple subarrays\n if len(set(subarray)) > 1:\n raise ValueError(\n \"This file appears to have multiple subarray \"\n \"values; only files with one subarray are \"\n \"supported.\"\n )\n else:\n # cannot set this to be the baseline array because it uses the\n # 256 convention, not our 2048 convention\n bl_input_array = np.int64(vis_hdu.data.par(\"BASELINE\"))\n\n # get antenna arrays based on uvfits baseline array\n self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(\n bl_input_array\n )\n\n if read_source:\n source = vis_hdu.data.par(\"SOURCE\")\n self.phase_center_id_array = source.astype(int)\n\n # get self.baseline_array using our convention\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n self.Nbls = len(np.unique(self.baseline_array))\n\n # initialize internal variables based on the antenna lists\n self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)\n\n # check for suffixes in the baseline coordinate names indicating the\n # baseline coordinate system\n if (\n \"UU\" in vis_hdu.data.parnames\n and \"VV\" in vis_hdu.data.parnames\n and \"WW\" in vis_hdu.data.parnames\n ):\n uvw_names = [\"UU\", \"VV\", \"WW\"]\n elif (\n \"UU---SIN\" in vis_hdu.data.parnames\n and \"VV---SIN\" in vis_hdu.data.parnames\n and \"WW---SIN\" in vis_hdu.data.parnames\n ):\n uvw_names = [\"UU---SIN\", \"VV---SIN\", \"WW---SIN\"]\n elif (\n \"UU---NCP\" in vis_hdu.data.parnames\n and \"VV---NCP\" in vis_hdu.data.parnames\n and \"WW---NCP\" in vis_hdu.data.parnames\n ):\n uvw_names = [\"UU---NCP\", \"VV---NCP\", \"WW---NCP\"]\n warnings.warn(\n \"The baseline coordinates (uvws) in this file are specified in the \"\n \"---NCP coordinate system, which is does not agree with our baseline \"\n \"coordinate conventions. Rotating the uvws to match our convention \"\n \"(Note that this rotation has not been widely tested).\"\n )\n else:\n raise ValueError(\n \"There is no consistent set of baseline coordinates in this file. \"\n \"The UU, VV and WW coordinate must have no suffix or the '---SIN' or \"\n \"'---NCP' suffix and the suffixes must match on all three baseline \"\n \"coordinate parameters.\"\n )\n\n # read baseline vectors in units of seconds, return in meters\n # FITS uvw direction convention is opposite ours and Miriad's.\n # So conjugate the visibilities and flip the uvws:\n # uvfits files often have uvws in single precision rather than double precision.\n # setting the dtype below enforces double precision\n self.uvw_array = (-1) * (\n np.array(\n np.stack(\n (\n vis_hdu.data.par(uvw_names[0]),\n vis_hdu.data.par(uvw_names[1]),\n vis_hdu.data.par(uvw_names[2]),\n )\n ),\n dtype=self._uvw_array.expected_type,\n )\n * const.c.to(\"m/s\").value\n ).T\n\n if \"INTTIM\" in vis_hdu.data.parnames:\n self.integration_time = np.asarray(\n vis_hdu.data.par(\"INTTIM\"), dtype=np.float64\n )\n else:\n if self.Ntimes > 1:\n # assume that all integration times in the file are the same\n int_time = self._calc_single_integration_time()\n self.integration_time = (\n np.ones_like(self.time_array, dtype=np.float64) * int_time\n )\n else:\n warnings.warn(\n \"The integration time is not specified and only one time is \"\n \"present so it cannot be calculated from the difference between \"\n \"integration times. Setting to None which will cause the check to \"\n \"error. Set `run_check` to False to read in the file without \"\n \"checking. Then set the integration_time (to an array of length \"\n \"Nblts) directly on the object to allow futher processing.\"\n )\n\n if proc is not None:\n proc.join()", "def loadData(self,ins):\n raise AbstractError" ]
[ "0.740036", "0.64931524", "0.6455669", "0.6353233", "0.6309897", "0.63086814", "0.6287176", "0.62651426", "0.6150741", "0.6106724", "0.60551184", "0.6034574", "0.6032307", "0.6026318", "0.59740245", "0.59592307", "0.5957556", "0.5954891", "0.5941733", "0.59211797", "0.5912147", "0.59041023", "0.5850041", "0.58398366", "0.58349544", "0.5815013", "0.5790082", "0.5738577", "0.57288086", "0.5719966", "0.5690484", "0.56745154", "0.5672856", "0.5670615", "0.5665425", "0.5664194", "0.56590486", "0.5658649", "0.5634652", "0.56257266", "0.5606272", "0.56060994", "0.5586458", "0.55779237", "0.55710703", "0.55450344", "0.55410457", "0.55400336", "0.5539686", "0.5532186", "0.55050313", "0.54937696", "0.5491772", "0.5469607", "0.5468124", "0.5462092", "0.5457851", "0.545697", "0.544728", "0.5446848", "0.5435916", "0.54313385", "0.54209477", "0.54135174", "0.54135174", "0.5409215", "0.5404905", "0.53922564", "0.5377673", "0.53737855", "0.5372014", "0.5363384", "0.5362832", "0.536241", "0.5360184", "0.53596735", "0.5356175", "0.53541124", "0.53492826", "0.5348318", "0.5340444", "0.53366697", "0.53334284", "0.5329254", "0.53291804", "0.53251195", "0.5315072", "0.5309565", "0.5306818", "0.5306398", "0.5305529", "0.530276", "0.5301666", "0.5295156", "0.5290095", "0.52847725", "0.52841526", "0.52826566", "0.52726203", "0.5272034", "0.5265983" ]
0.0
-1
Convert 2D alignment parameters (alpha, sx, sy, mirror) into 3D alignment parameters (phi, theta, psi, s2x, s2y, mirror)
def params_2D_3D(alpha, sx, sy, mirror): phi = 0 psi = 0 theta = 0 alphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1) if mirror > 0: phi = (540.0 + phi)%360.0 theta = 180.0 - theta psi = (540.0 - psi + alphan)%360.0 else: psi = (psi + alphan)%360.0 return phi, theta, psi, s2x, s2y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def get_affine_matrix3d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angles: Tensor,\n sxy: Tensor | None = None,\n sxz: Tensor | None = None,\n syx: Tensor | None = None,\n syz: Tensor | None = None,\n szx: Tensor | None = None,\n szy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def vs3_func_3(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n ang_deg, d = vs_params # degrees, nm\n ang_rad = np.deg2rad(ang_deg) # retrieve radians\n d = d * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = r_jk - (np.dot(r_ij, r_jk) / np.dot(r_ij, r_ij)) * r_ij\n traj[ts.frame] = pos_i + d * np.cos(ang_rad) * (r_ij / mda.lib.mdamath.norm(r_ij)) + d * np.sin(ang_rad) * (\n comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm**(-1)\n c = c / 10 # retrieve amgstrom**(-1) for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (\n r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))", "def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b = vs_params # weight, nm\n b = b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = (1 - a) * r_ij + a * r_jk\n traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def vs3_func_1(ns, traj, vs_def_beads_ids, vs_params):\n\n i, j, k = vs_def_beads_ids\n a, b = vs_params # nm, nm\n a, b = a * 10, b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij / mda.lib.mdamath.norm(r_ij) / 2 + b * r_ik / mda.lib.mdamath.norm(r_ik) / 2", "def get_affine_matrix3d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angles: torch.Tensor,\n sxy: Optional[torch.Tensor] = None,\n sxz: Optional[torch.Tensor] = None,\n syx: Optional[torch.Tensor] = None,\n syz: Optional[torch.Tensor] = None,\n szx: Optional[torch.Tensor] = None,\n szy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def angleAxis2rot3D(axis, theta):\n if len(axis) is not 3:\n raise ValueError('Number of axis element must be 3!')\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cosTheta = np.cos(theta)\n bracket = 1 - cosTheta\n aBracket = a * bracket\n bBracket = b * bracket\n cBracket = c * bracket\n sinTheta = np.sin(theta)\n aSinTheta = a * sinTheta\n bSinTheta = b * sinTheta\n cSinTheta = c * sinTheta\n rot3D = np.array([[a*aBracket+cosTheta, a*bBracket-cSinTheta, a*cBracket+bSinTheta],\n [b*aBracket+cSinTheta, b*bBracket+cosTheta, b*cBracket-aSinTheta],\n [c*aBracket-bSinTheta, c*bBracket+aSinTheta, c*cBracket+cosTheta]])\n return rot3D", "def eq_to_3d(ra, dec):\r\n x = np.cos(ra) * np.cos(dec)\r\n y = np.sin(ra) * np.cos(dec)\r\n z = np.sin(dec)\r\n return x, y, z", "def ancmig_adj_3(params, ns):\n #8 parameters \n nu1, nuA, nu2, nu3, m1_1, T1a, T1b, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1a, m=mig1)\n fs.integrate(nu_T1, T1b) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n return fs", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def transform_params(cls, orion_params, space):\n ax_params = {}\n for dim in space.values():\n if dim.type == \"fidelity\":\n continue\n\n ax_params[dim.name] = orion_params[dim.name]\n\n return ax_params", "def to_se3(self, state: Vector) -> RigidTransform:\n return (state[:9],state[9:12])", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def rigid_transform_3d(xs,ys):\n assert xs.shape == ys.shape\n assert xs.shape[0] == 3, 'The points must be of dimmensionality 3'\n\n # find centroids and H\n x_centroid = np.mean(xs, axis=1)[:, np.newaxis]\n y_centroid = np.mean(ys, axis=1)[:, np.newaxis]\n \n H = (xs - x_centroid)@(ys - y_centroid).T\n\n # find rotation\n U, S, Vt = np.linalg.svd(H)\n rotation = [email protected]\n\n # handling reflection\n if np.linalg.det(rotation) < 0:\n Vt[2, :] *= -1\n rotation = np.dot(Vt.T, U.T)\n \n # find translation\n translation = y_centroid - rotation@x_centroid\n \n return translation, rotation", "def affine_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat3(0)\r\n M2 = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def to_se3(self, state: Vector) -> RigidTransform:\n return (state[:9],state[9:])", "def xyz_to_zyx(xyz1, xyz2, xyz3):\n\n # Converto gli angoli in ingresso in gradi\n xyz1_r = xyz1 / 180.0 * Kinematic.M_PI\n xyz2_r = xyz2 / 180.0 * Kinematic.M_PI\n xyz3_r = xyz3 / 180.0 * Kinematic.M_PI\n\n # Calcolo l'elemento 3:1 della prima matrice (s1s3 - c1c3s2)\n minus_s2_xyz = (math.sin(xyz1_r) * math.sin(xyz3_r)) - (math.cos(xyz1_r) * math.cos(xyz3_r) * math.sin(xyz2_r))\n\n # Calcolo l'elemento 2:1 della prima matrice (c1s3 + c3s1s2)\n c2s1_xyz = (math.cos(xyz1_r) * math.sin(xyz3_r)) + (math.cos(xyz3_r) * math.sin(xyz1_r) * math.sin(xyz2_r))\n\n # Calcolo l'elemento 2:3 della prima matrice (c3s1 + c1s2s3)\n c2s3_xyz = (math.cos(xyz3_r) * math.sin(xyz1_r)) + (math.cos(xyz1_r)) - (math.sin(xyz2_r) * math.sin(xyz3_r))\n\n # Ora trovo gli angoli\n zyx2_r = math.asin(-minus_s2_xyz)\n c2_xyz = math.cos(zyx2_r)\n zyx1_r = math.asin(c2s1_xyz / c2_xyz)\n zyx3_r = math.asin(c2s3_xyz / c2_xyz)\n zyx3 = zyx3_r / Kinematic.M_PI * 180.0\n zyx2 = zyx2_r / Kinematic.M_PI * 180.0\n zyx1 = zyx1_r / Kinematic.M_PI * 180.0\n\n return [zyx3, zyx2, zyx1, zyx3_r, zyx2_r, zyx1_r]", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def alignPairShapes(s1,s2,weights):\n\n\n s1=np.asarray(s1)\n s2=np.asarray(s2)\n \n x1k=s1[:,0]\n y1k=s1[:,1]\n x2k=s2[:,0]\n y2k=s2[:,1]\n\n X1=sum(x1k*weights) \n X2=sum(x2k*weights)\n\n Y1=sum(y1k*weights)\n Y2=sum(y2k*weights)\n\n Z=sum(weights*(pow(x2k,2)+pow(y2k,2)))\n\n W=sum(weights)\n\n C1=sum(weights*(x1k*x2k+y1k*y2k))\n\n C2=sum(weights*(y1k*x2k-x1k*y2k))\n \n a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]])\n b=np.asarray([X1,Y1,C1,C2])\n\n x=np.linalg.solve(a,b)\n\n ax=x[0]\n ay=x[1]\n tx=x[2]\n ty=x[3]\n return ax,ay,tx,ty", "def angle3pt(\n ax: float, ay: float, bx: float, by: float, cx: float, cy: float\n ) -> float:\n ang = math.degrees(math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))\n return ang + 360 if ang < 0 else ang", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def similarity_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n k = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n k += w[i]*p_adj.dot(p_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n k = np.sum(S)/k\r\n v_out = k*M*(v - p_wgt) + q_wgt\r\n return v_out", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def getTranslationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in separate array - easier for optimization\n nprojs = len(TiltSeries_._ProjectionList._list)\n self._alignmentTransX = nprojs * [0.]\n self._alignmentTransY = nprojs * [0.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentTransX[kk] = proj.getAlignmentTransX()\n self._alignmentTransY[kk] = proj.getAlignmentTransY()\n return self._alignmentTransX, self._alignmentTransY", "def _transform_warp_impl3d(\n src: Tensor,\n dst_pix_trans_src_pix: Tensor,\n dsize_src: tuple[int, int, int],\n dsize_dst: tuple[int, int, int],\n grid_mode: str,\n padding_mode: str,\n align_corners: bool,\n) -> Tensor:\n dst_norm_trans_src_norm: Tensor = normalize_homography3d(dst_pix_trans_src_pix, dsize_src, dsize_dst)\n\n src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm)\n return homography_warp3d(src, src_norm_trans_dst_norm, dsize_dst, grid_mode, padding_mode, align_corners, True)", "def get_affine_matrix2d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angle: Tensor,\n sx: Tensor | None = None,\n sy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def update_parameters(params, grads, alpha):\n n_layers = len(params) // 2\n for i in range(n_layers):\n params['w%s' % (i+1)] = (\n params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])\n params['b%s' % (i+1)] = (\n params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])\n return params", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def rigid_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def jointImgTo3D(sample):\n ret = np.zeros((3,), np.float32)\n # convert to metric using f\n ret[0] = (sample[0]-centerX)*sample[2]/focalLengthX\n ret[1] = (sample[1]-centerY)*sample[2]/focalLengthY\n ret[2] = sample[2]\n return ret", "def kinect_transform(self, x, y, z):\n xposition = x\n yposition = y\n zposition = z\n\n return zposition, xposition, yposition", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def EncodeMorton3D(x, y, z):\r\n return Expand3D(x) + (Expand3D(y) << 1) + (Expand3D(z) << 2)", "def test_translate_to_center_of_mass(self):\n xyz = \"\"\"O 1.28706525 0.52121353 0.04219198\nC 0.39745682 -0.35265044 -0.63649234\nC 0.36441173 -1.68197093 0.08682400\nH -0.59818222 0.10068325 -0.65235399\nH 0.74799641 -0.48357798 -1.66461710\nH 0.03647269 -1.54932006 1.12314420\nH -0.31340646 -2.38081353 -0.41122551\nH 1.36475837 -2.12581592 0.12433596\nH 2.16336803 0.09985803 0.03295192\n\"\"\"\n translated_xyz = converter.translate_to_center_of_mass(converter.str_to_xyz(xyz))\n cm_x, cm_y, cm_z = converter.get_center_of_mass(xyz=translated_xyz)\n self.assertAlmostEqual(cm_x, 0.0000, 3)\n self.assertAlmostEqual(cm_y, 0.0000, 3)\n self.assertAlmostEqual(cm_z, 0.0000, 3)\n\n xyz = {'coords': ((0.0, 0.0, 0.113488),\n (0.0, 0.93867, -0.264806),\n (0.812912, -0.469335, -0.264806),\n (-0.812912, -0.469335, -0.264806)),\n 'symbols': ('N', 'H', 'H', 'H')}\n translated_xyz = converter.translate_to_center_of_mass(converter.check_xyz_dict(xyz))\n expected_xyz = \"\"\"N 0.00000000 0.00000000 0.06717524\nH 0.00000000 0.93867000 -0.31111876\nH 0.81291200 -0.46933500 -0.31111876\nH -0.81291200 -0.46933500 -0.31111876\"\"\"\n self.assertEqual(converter.xyz_to_str(translated_xyz), expected_xyz)\n cm_x, cm_y, cm_z = converter.get_center_of_mass(translated_xyz)\n self.assertAlmostEqual(cm_x, 0.0000, 3)\n self.assertAlmostEqual(cm_y, 0.0000, 3)\n self.assertAlmostEqual(cm_z, 0.0000, 3)", "def translate3d(p, a=0, b=0, c=0):\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [a,b,c,1]\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def ik3(xyz_array):\n # Eqn 1\n theta_1 = np.arctan2(xyz_array[1], xyz_array[0])\n # Eqn 2\n r1 = np.hypot(xyz_array[0], xyz_array[1])\n # Eqn 3\n r2 = xyz_array[2] - link_lengths[0]\n # Eqn 4\n phi2 = np.arctan2(r2, r1)\n # Eqn 5\n r3 = np.hypot(r1, r2)\n # Eqn 6\n num6 = np.power(link_lengths[2], 2) - \\\n np.power(link_lengths[1], 2) - np.power(r3, 2)\n den6 = -2 * link_lengths[1] * r3\n phi1 = np.arccos(num6 / den6)\n # Eqn 7\n # theta_2 = phi2 - phi1 # elbow down\n theta_2 = phi2 + phi1\n # Eqn 8\n num8 = np.power(r3, 2) - \\\n np.power(link_lengths[1], 2) - np.power(link_lengths[2], 2)\n den8 = -2 * link_lengths[1] * link_lengths[2]\n phi3 = np.arccos(num8 / den8)\n # Eqn 9\n # theta_3 = pi - phi3 # elbow down\n theta_3 = -(np.pi - phi3)\n # Output Joint Angles\n theta_1 = np.rad2deg(theta_1)\n theta_2 = np.rad2deg(theta_2)\n theta_3 = np.rad2deg(theta_3)\n joint_rotations = np.array([theta_1, theta_2, theta_3])\n return joint_rotations", "def alignCtx(*args, align: bool=True, anchorFirstObject: bool=False, distribute: bool=True,\n exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n showAlignTouch: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def transform(self, ((a, b), (c, d))=((1, 1), (-1, 1)), aligned_with_grid=False):\n (x0, y0), (x1, y1) = self.vertices\n return type(self)((int(a * x0 + c * y0), int(b * x0 + d * y0)),\n (int(a * x1 + c * y1), int(b * x1 + d * y1)),\n aligned_with_grid=aligned_with_grid)", "def ion2_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion2 = x*self.a + y*self.b + z*self.c\n self.position['1B'] = np.dot(self.position_map[1],axes_vector) + self.ion2\n self.position['2B'] = np.dot(self.position_map[2],axes_vector) + self.ion2\n self.position['3B'] = np.dot(self.position_map[3],axes_vector) + self.ion2\n self.position['4B'] = np.dot(self.position_map[4],axes_vector) + self.ion2\n self.position['5B'] = np.dot(self.position_map[5],axes_vector) + self.ion2\n self.position['6B'] = np.dot(self.position_map[6],axes_vector) + self.ion2\n self.position['7B'] = np.dot(self.position_map[7],axes_vector) + self.ion2\n self.position['8B'] = np.dot(self.position_map[8],axes_vector) + self.ion2", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def rotation3Dy(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta)\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0\n rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta)\n\n return rmat", "def decompose(xform, angles=True, shears=False):\n\n # The inline comments in the code below are taken verbatim from\n # the referenced article, [except for notes in square brackets].\n\n # The next step is to extract the translations. This is trivial;\n # we find t_x = M_{4,1}, t_y = M_{4,2}, and t_z = M_{4,3}. At this\n # point we are left with a 3*3 matrix M' = M_{1..3,1..3}.\n xform = np.array(xform).T\n\n if xform.shape == (4, 4):\n translations = xform[ 3, :3]\n xform = xform[:3, :3]\n else:\n translations = np.array([0, 0, 0])\n\n M1 = xform[0]\n M2 = xform[1]\n M3 = xform[2]\n\n # The process of finding the scaling factors and shear parameters\n # is interleaved. First, find s_x = |M'_1|.\n sx = np.sqrt(np.dot(M1, M1))\n M1 = M1 / sx\n\n # Then, compute an initial value for the xy shear factor,\n # s_xy = M'_1 * M'_2. (this is too large by the y scaling factor).\n sxy = np.dot(M1, M2)\n\n # The second row of the matrix is made orthogonal to the first by\n # setting M'_2 = M'_2 - s_xy * M'_1.\n M2 = M2 - sxy * M1\n\n # Then the y scaling factor, s_y, is the length of the modified\n # second row.\n sy = np.sqrt(np.dot(M2, M2))\n\n # The second row is normalized, and s_xy is divided by s_y to\n # get its final value.\n M2 = M2 / sy\n sxy = sxy / sx\n\n # The xz and yz shear factors are computed as in the preceding,\n sxz = np.dot(M1, M3)\n syz = np.dot(M2, M3)\n\n # the third row is made orthogonal to the first two rows,\n M3 = M3 - sxz * M1 - syz * M2\n\n # the z scaling factor is computed,\n sz = np.sqrt(np.dot(M3, M3))\n\n # the third row is normalized, and the xz and yz shear factors are\n # rescaled.\n M3 = M3 / sz\n sxz = sxz / sx\n syz = syz / sy\n\n # The resulting matrix now is a pure rotation matrix, except that it\n # might still include a scale factor of -1. If the determinant of the\n # matrix is -1, negate the matrix and all three scaling factors. Call\n # the resulting matrix R.\n #\n # [We do things different here - if the rotation matrix has negative\n # determinant, the flip is encoded in the x scaling factor.]\n R = np.array([M1, M2, M3])\n if linalg.det(R) < 0:\n R[0] = -R[0]\n sx = -sx\n\n # Finally, we need to decompose the rotation matrix into a sequence\n # of rotations about the x, y, and z axes. [This is done in the\n # rotMatToAxisAngles function]\n if angles: rotations = rotMatToAxisAngles(R.T)\n else: rotations = R.T\n\n retval = [np.array([sx, sy, sz]), translations, rotations]\n\n if shears:\n retval.append(np.array((sxy, sxz, syz)))\n\n return tuple(retval)", "def convertParameters(self,mu,sigma=None,theta=None,alpha=None):\n\t\tif sigma is not None:\n\t\t\tr = mu**2 / (sigma**2-mu)\n\t\telif theta is not None:\n\t\t\tr = theta\n\t\telif alpha is not None:\n\t\t\tr = 1./alpha\n\t\tp = r / (r + mu)\n\t\treturn r,p", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def get_affine_matrix2d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angle: torch.Tensor,\n sx: Optional[torch.Tensor] = None,\n sy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def inverse_transform3(phi, theta=0.0, psi=0.0, tx=0.0, ty=0.0, tz=0.0, mirror = 0, scale=1.0):\n\n\td = Transform({'type': 'spider', 'phi': phi, 'theta': theta, 'psi': psi, 'tx': tx, 'ty': ty, 'tz': tz, \"mirror\":mirror,\"scale\":scale})\n\td = d.inverse()\n\td = d.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def affine_params(key, o, u, ifactor=1.0):\n keys = random.split(key, 2)\n ifactor = ifactor / np.sqrt(u)\n return {'w' : random.normal(keys[0], (o, u)) * ifactor,\n 'b' : np.zeros((o,))}", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def R3(theta):\n\n DCM = np.array([[np.cos(theta), np.sin(theta), 0], \n [-np.sin(theta), np.cos(theta), 0], \n [0, 0, 1]])\n\n return DCM", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def mesh_verts_align(self, smpl_verts, verts, smpl_J, J_3d, eps=1e-8):\n # finding bounding boxes\n bbox_1_x_min, bbox_1_x_max = np.min (smpl_verts[:, 0]), np.max (smpl_verts[:, 0])\n bbox_1_y_min, bbox_1_y_max = np.min (smpl_verts[:, 1]), np.max (smpl_verts[:, 1])\n # bbox_1_z_min, bbox_1_z_max = np.min (smpl_verts[:, 2]), np.max (smpl_verts[:, 2])\n # H1 = bbox_1_z_max - bbox_1_z_min\n W1 = bbox_1_y_max - bbox_1_y_min\n D1 = bbox_1_x_max - bbox_1_x_min\n\n bbox_2_x_min, bbox_2_x_max = np.min (verts[:, 0]), np.max (verts[:, 0])\n bbox_2_y_min, bbox_2_y_max = np.min (verts[:, 1]), np.max (verts[:, 1])\n # bbox_2_z_min, bbox_2_z_max = np.min (verts[:, 2]), np.max (verts[:, 2])\n # H2 = bbox_2_z_max - bbox_2_z_min\n W2 = bbox_2_y_max - bbox_2_y_min\n D2 = bbox_2_x_max - bbox_2_x_min\n\n # get_centers\n # center_1 = 0.5 * np.array ([(bbox_1_x_min + bbox_1_x_max),\n # (bbox_1_y_min + bbox_1_y_max),\n # (bbox_1_z_min + bbox_1_z_max)])\n #\n # center_2 = 0.5 * np.array ([(bbox_2_x_min + bbox_2_x_max),\n # (bbox_2_y_min + bbox_2_y_max),\n # (bbox_2_z_min + bbox_2_z_max)])\n\n verts = verts - J_3d[0]\n J_3d = J_3d - J_3d[0]\n s = ((D1 / D2 + eps) + (W1 / W2 + eps)) / 2.0\n # verts[:, 0] = verts[:, 0] * (D1 / D2 + eps)\n # verts[:, 1] = verts[:, 1] * (W1 / W2 + eps)\n # verts[:, 2] = verts[:, 2] * (H1 / H2 + eps)\n verts = verts * s\n J_3d = J_3d * s\n\n verts = verts + smpl_J[0]\n J_3d = J_3d + smpl_J[0]\n return verts.astype ('float16'), J_3d.astype ('float16')", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def get_shear_matrix3d(\n center: torch.Tensor,\n sxy: Optional[torch.Tensor] = None,\n sxz: Optional[torch.Tensor] = None,\n syx: Optional[torch.Tensor] = None,\n syz: Optional[torch.Tensor] = None,\n szx: Optional[torch.Tensor] = None,\n szy: Optional[torch.Tensor] = None,\n):\n sxy = torch.tensor([0.0]).repeat(center.size(0)) if sxy is None else sxy\n sxz = torch.tensor([0.0]).repeat(center.size(0)) if sxz is None else sxz\n syx = torch.tensor([0.0]).repeat(center.size(0)) if syx is None else syx\n syz = torch.tensor([0.0]).repeat(center.size(0)) if syz is None else syz\n szx = torch.tensor([0.0]).repeat(center.size(0)) if szx is None else szx\n szy = torch.tensor([0.0]).repeat(center.size(0)) if szy is None else szy\n\n x, y, z = torch.split(center, 1, dim=-1)\n x, y, z = x.view(-1), y.view(-1), z.view(-1)\n # Prepare parameters\n sxy_tan = torch.tan(sxy) # type: ignore\n sxz_tan = torch.tan(sxz) # type: ignore\n syx_tan = torch.tan(syx) # type: ignore\n syz_tan = torch.tan(syz) # type: ignore\n szx_tan = torch.tan(szx) # type: ignore\n szy_tan = torch.tan(szy) # type: ignore\n\n # compute translation matrix\n m00, m10, m20, m01, m11, m21, m02, m12, m22 = _compute_shear_matrix_3d(\n sxy_tan, sxz_tan, syx_tan, syz_tan, szx_tan, szy_tan\n )\n\n m03 = m01 * y + m02 * z\n m13 = m10 * x + m11 * y + m12 * z - y\n m23 = m20 * x + m21 * y + m22 * z - z\n\n # shear matrix is implemented with negative values\n sxy_tan, sxz_tan, syx_tan, syz_tan, szx_tan, szy_tan = -sxy_tan, -sxz_tan, -syx_tan, -syz_tan, -szx_tan, -szy_tan\n m00, m10, m20, m01, m11, m21, m02, m12, m22 = _compute_shear_matrix_3d(\n sxy_tan, sxz_tan, syx_tan, syz_tan, szx_tan, szy_tan\n )\n\n shear_mat = torch.stack([m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23], dim=-1).view(-1, 3, 4)\n shear_mat = convert_affinematrix_to_homography3d(shear_mat)\n\n return shear_mat", "def align_c_axis_along_001(structure):\n\n c = structure.lattice._matrix[2]\n z = [0, 0, 1]\n axis = np.cross(c, z)\n if not(axis[0] == 0 and axis[1] == 0):\n theta = (np.arccos(np.dot(c, z) / (np.linalg.norm(c) * np.linalg.norm(z))))\n R = get_rotation_matrix(axis, theta)\n rotation = SymmOp.from_rotation_and_translation(rotation_matrix=R)\n structure.apply_operation(rotation)\n return structure", "def from_parameters(cls, a, b, c, alpha, beta, gamma):\r\n angles_r = np.radians([alpha, beta, gamma])\r\n cos_alpha, cos_beta, cos_gamma = np.cos(angles_r)\r\n sin_alpha, sin_beta, sin_gamma = np.sin(angles_r)\r\n val = cls._abs_cap((cos_alpha * cos_beta - cos_gamma) / (sin_alpha * sin_beta))\r\n va = [a * sin_beta, 0.0, a * cos_beta]\r\n vb = [-b * sin_alpha * np.cos(np.arccos(val)),\r\n b * sin_alpha * np.sin(np.arccos(val)), b * cos_alpha]\r\n vc = [0.0, 0.0, float(c)]\r\n return cls(np.asarray([va, vb, vc]))", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def tf_box_3d_to_anchor(boxes_3d):\n\n boxes_3d = tf.reshape(boxes_3d, [-1, 7])\n\n anchors_x = boxes_3d[:, 0]\n anchors_y = boxes_3d[:, 1]\n anchors_z = boxes_3d[:, 2]\n\n # Dimensions along x, y, z\n box_l = boxes_3d[:, 3]\n box_w = boxes_3d[:, 4]\n box_h = boxes_3d[:, 5]\n box_ry = boxes_3d[:, 6]\n\n # Ortho rotate\n half_pi = np.pi / 2\n box_ry = tf.round(box_ry / half_pi) * half_pi\n cos_ry = tf.abs(tf.cos(box_ry))\n sin_ry = tf.abs(tf.sin(box_ry))\n\n anchors_dimx = box_l * cos_ry + box_w * sin_ry\n anchors_dimy = box_h\n anchors_dimz = box_w * cos_ry + box_l * sin_ry\n\n anchors = tf.stack([anchors_x, anchors_y, anchors_z,\n anchors_dimx, anchors_dimy, anchors_dimz],\n axis=1)\n\n return anchors", "def align_z_along_fixed_ends(xyz_file_parts, fixed_beginning, fixed_end):\n\n\t\tmolecule_axis = [xyz_file_parts[-1][1,fixed_end],xyz_file_parts[-1][2,fixed_end],xyz_file_parts[-1][3,fixed_end]]\n\n\n\t\tangle = np.arccos(molecule_axis[2]/np.linalg.norm(molecule_axis))\n\t\ttheta = angle\n\n\t\tif(angle != 0):\n\t\t\t#calculate rotation axis\n\t\t\trotation_axis = np.cross(molecule_axis, [0.0,0.0,1.0])\n\t\t\trotation_axis = 1.0/np.linalg.norm(rotation_axis)*rotation_axis\n\t\t\tu = rotation_axis\n\n\t\t\t#calculate rotation_matrix\n\t\t\trotation_matrix = [[np.cos(theta) + u[0]**2 * (1-np.cos(theta)), u[0] * u[1] * (1-np.cos(theta)) - u[2] * np.sin(theta), u[0] * u[2] * (1 - np.cos(theta)) + u[1] * np.sin(theta)],\n\t [u[0] * u[1] * (1-np.cos(theta)) + u[2] * np.sin(theta), np.cos(theta) + u[1]**2 * (1-np.cos(theta)), u[1] * u[2] * (1 - np.cos(theta)) - u[0] * np.sin(theta)],\n\t [u[0] * u[2] * (1-np.cos(theta)) - u[1] * np.sin(theta), u[1] * u[2] * (1-np.cos(theta)) + u[0] * np.sin(theta), np.cos(theta) + u[2]**2 * (1-np.cos(theta))]]\n\n\t\t\tfor j in range(0, len(xyz_file_parts)):\n\t\t\t\tfor i in range(0, len(xyz_file_parts[j][1,:])):\n\t\t\t\t\t \n\t\t\t\t\tvector_to_rotate = [round(float(xyz_file_parts[j][1,i]),5),round(float(xyz_file_parts[j][2,i]),5),round(float(xyz_file_parts[j][3,i]),5)]\n\t\t\t\t\trotated_vector = np.asmatrix(rotation_matrix)*np.asmatrix(vector_to_rotate).T\n\t\t\t\t\txyz_file_parts[j][1,i] = round(rotated_vector[0,0],5)\n\t\t\t\t\txyz_file_parts[j][2,i] = round(rotated_vector[1,0],5)\n\t\t\t\t\txyz_file_parts[j][3,i] = round(rotated_vector[2,0],5)\n\t\t\treturn xyz_file_parts\n\t\telse:\n\t\t\treturn xyz_file_parts", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def translation(self, x, y, z) -> None:\n ...", "def vs4_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k, l = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm\n c = c * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n pos_l = ns.aa2cg_universe.atoms[l].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n r_il = pos_l - pos_i\n r_ja = a * r_ik - r_ij\n r_jb = b * r_il - r_ij\n r_m = np.cross(r_ja, r_jb)\n traj[ts.frame] = pos_i - c * (r_m / mda.lib.mdamath.norm(r_m))", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def reflection ((x,y),(w,z)):\n twodot = 2*dotprod((x,y),(w,z))\n a, b = x - twodot* w, y - twodot*z\n return (a,b)", "def rotation3D_y(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, 0.0, s], [0.0, 1.0, 0.0], [-s, 0.0, c]])", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def _correct_image3D_by_microscope_param(image3D:np.ndarray, microscope_params:dict):\n _image = copy.copy(image3D)\n if not isinstance(microscope_params, dict):\n raise TypeError(f\"Wrong inputt ype for microscope_params, should be a dict\")\n # transpose\n if 'transpose' in microscope_params and microscope_params['transpose']:\n _image = _image.transpose((0,2,1))\n if 'flip_horizontal' in microscope_params and microscope_params['flip_horizontal']:\n _image = np.flip(_image, 2)\n if 'flip_vertical' in microscope_params and microscope_params['flip_vertical']:\n _image = np.flip(_image, 1)\n return _image", "def grid_pos_to_params(grid_data, params):\n func_kwargs = {}\n for j,k in enumerate(params):\n func_kwargs[k] = grid_data[j] \n return func_kwargs", "def text_alignment(x, y):\n if x == 0:\n ha = \"center\"\n elif x > 0:\n ha = \"left\"\n else:\n ha = \"right\"\n if y == 0:\n va = \"center\"\n elif y > 0:\n va = \"bottom\"\n else:\n va = \"top\"\n\n return ha, va", "def scale3d(p, a=1, b=1, c=1):\n translation_mat = np.matrix([\n [a,0,0,0],\n [0,b,0,0],\n [0,0,c,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p", "def _beam_fit_fn_3(z, z0, Theta):\n return (Theta*(z-z0))**2" ]
[ "0.75017375", "0.6478277", "0.647607", "0.63587636", "0.60074914", "0.5803451", "0.5758167", "0.5752519", "0.5700443", "0.5646637", "0.56198543", "0.5572641", "0.5455936", "0.54369533", "0.5418458", "0.5403827", "0.539565", "0.5376442", "0.53718555", "0.53706646", "0.53607404", "0.5297691", "0.5282178", "0.5258941", "0.525507", "0.5244696", "0.5231194", "0.5214326", "0.5208538", "0.52048063", "0.5201825", "0.5186167", "0.51787484", "0.51566374", "0.5152582", "0.5121605", "0.51027685", "0.5095632", "0.5092874", "0.50914603", "0.50852305", "0.5083552", "0.50752527", "0.50694174", "0.5056798", "0.503431", "0.50226074", "0.5011666", "0.50090784", "0.50083566", "0.49974698", "0.49955487", "0.49924698", "0.49890563", "0.49861413", "0.49858057", "0.49811044", "0.4973293", "0.49615544", "0.49600074", "0.49541774", "0.4945725", "0.4941042", "0.49371648", "0.4932653", "0.49311274", "0.49083522", "0.49071565", "0.49061123", "0.4905078", "0.49050066", "0.49025732", "0.48999578", "0.48933414", "0.48874316", "0.4881988", "0.4878496", "0.48709932", "0.48645684", "0.48622984", "0.48597327", "0.48509225", "0.48485216", "0.48407835", "0.4839882", "0.48301986", "0.48298365", "0.48292118", "0.48239163", "0.48201993", "0.4817458", "0.4809181", "0.48025075", "0.48019144", "0.48015556", "0.47982955", "0.47951725", "0.47889695", "0.47889403", "0.4784967" ]
0.78717786
0
Convert 3D alignment parameters (phi, theta, psi, s2x, s2y) there is no mirror in 3D! into 2D alignment parameters (alpha, sx, sy, mirror)
def params_3D_2D(phi, theta, psi, s2x, s2y): if theta > 90.0: mirror = 1 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0) else: mirror = 0 alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0) return alpha, sx, sy, mirror
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def get_params3D(ima, xform = \"xform.align3d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def sat_3d_position(sat_2d_position):\n return np.dot(transformation_parameter, xi_eta(sat_2d_position))", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def set_params3D(ima, p, xform = \"xform.align3d\"):\n\tt = Transform({\"type\":\"spider\",\"phi\":p[0],\"theta\":p[1],\"psi\":p[2],\"tx\":p[3],\"ty\":p[4],\"tz\":p[5],\"mirror\":p[6],\"scale\":p[7]})\n\tima.set_attr(xform, t)", "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm**(-1)\n c = c / 10 # retrieve amgstrom**(-1) for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (\n r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def vs3_func_1(ns, traj, vs_def_beads_ids, vs_params):\n\n i, j, k = vs_def_beads_ids\n a, b = vs_params # nm, nm\n a, b = a * 10, b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n traj[ts.frame] = pos_i + a * r_ij / mda.lib.mdamath.norm(r_ij) / 2 + b * r_ik / mda.lib.mdamath.norm(r_ik) / 2", "def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n a, b = vs_params # weight, nm\n b = b * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = (1 - a) * r_ij + a * r_jk\n traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def getTranslationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in separate array - easier for optimization\n nprojs = len(TiltSeries_._ProjectionList._list)\n self._alignmentTransX = nprojs * [0.]\n self._alignmentTransY = nprojs * [0.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentTransX[kk] = proj.getAlignmentTransX()\n self._alignmentTransY[kk] = proj.getAlignmentTransY()\n return self._alignmentTransX, self._alignmentTransY", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def get_affine_matrix3d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angles: Tensor,\n sxy: Tensor | None = None,\n sxz: Tensor | None = None,\n syx: Tensor | None = None,\n syz: Tensor | None = None,\n szx: Tensor | None = None,\n szy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def xyz_to_zyx(xyz1, xyz2, xyz3):\n\n # Converto gli angoli in ingresso in gradi\n xyz1_r = xyz1 / 180.0 * Kinematic.M_PI\n xyz2_r = xyz2 / 180.0 * Kinematic.M_PI\n xyz3_r = xyz3 / 180.0 * Kinematic.M_PI\n\n # Calcolo l'elemento 3:1 della prima matrice (s1s3 - c1c3s2)\n minus_s2_xyz = (math.sin(xyz1_r) * math.sin(xyz3_r)) - (math.cos(xyz1_r) * math.cos(xyz3_r) * math.sin(xyz2_r))\n\n # Calcolo l'elemento 2:1 della prima matrice (c1s3 + c3s1s2)\n c2s1_xyz = (math.cos(xyz1_r) * math.sin(xyz3_r)) + (math.cos(xyz3_r) * math.sin(xyz1_r) * math.sin(xyz2_r))\n\n # Calcolo l'elemento 2:3 della prima matrice (c3s1 + c1s2s3)\n c2s3_xyz = (math.cos(xyz3_r) * math.sin(xyz1_r)) + (math.cos(xyz1_r)) - (math.sin(xyz2_r) * math.sin(xyz3_r))\n\n # Ora trovo gli angoli\n zyx2_r = math.asin(-minus_s2_xyz)\n c2_xyz = math.cos(zyx2_r)\n zyx1_r = math.asin(c2s1_xyz / c2_xyz)\n zyx3_r = math.asin(c2s3_xyz / c2_xyz)\n zyx3 = zyx3_r / Kinematic.M_PI * 180.0\n zyx2 = zyx2_r / Kinematic.M_PI * 180.0\n zyx1 = zyx1_r / Kinematic.M_PI * 180.0\n\n return [zyx3, zyx2, zyx1, zyx3_r, zyx2_r, zyx1_r]", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def affine_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat3(0)\r\n M2 = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def vs3_func_3(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k = vs_def_beads_ids\n ang_deg, d = vs_params # degrees, nm\n ang_rad = np.deg2rad(ang_deg) # retrieve radians\n d = d * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n r_ij = pos_j - pos_i\n r_jk = pos_k - pos_j\n comb_ijk = r_jk - (np.dot(r_ij, r_jk) / np.dot(r_ij, r_ij)) * r_ij\n traj[ts.frame] = pos_i + d * np.cos(ang_rad) * (r_ij / mda.lib.mdamath.norm(r_ij)) + d * np.sin(ang_rad) * (\n comb_ijk / mda.lib.mdamath.norm(comb_ijk))", "def ancmig_adj_3(params, ns):\n #8 parameters \n nu1, nuA, nu2, nu3, m1_1, T1a, T1b, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1a, m=mig1)\n fs.integrate(nu_T1, T1b) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n return fs", "def to_se3(self, state: Vector) -> RigidTransform:\n return (state[:9],state[9:12])", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def get_affine_matrix3d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angles: torch.Tensor,\n sxy: Optional[torch.Tensor] = None,\n sxz: Optional[torch.Tensor] = None,\n syx: Optional[torch.Tensor] = None,\n syz: Optional[torch.Tensor] = None,\n szx: Optional[torch.Tensor] = None,\n szy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_projective_transform(center, -angles, scale)\n transform[..., 3] += translations # tx/ty/tz\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography3d(transform)\n if any(s is not None for s in [sxy, sxz, syx, syz, szx, szy]):\n shear_mat = get_shear_matrix3d(center, sxy, sxz, syx, syz, szx, szy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def _transform_warp_impl3d(\n src: Tensor,\n dst_pix_trans_src_pix: Tensor,\n dsize_src: tuple[int, int, int],\n dsize_dst: tuple[int, int, int],\n grid_mode: str,\n padding_mode: str,\n align_corners: bool,\n) -> Tensor:\n dst_norm_trans_src_norm: Tensor = normalize_homography3d(dst_pix_trans_src_pix, dsize_src, dsize_dst)\n\n src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm)\n return homography_warp3d(src, src_norm_trans_dst_norm, dsize_dst, grid_mode, padding_mode, align_corners, True)", "def rigid_transform_3d(xs,ys):\n assert xs.shape == ys.shape\n assert xs.shape[0] == 3, 'The points must be of dimmensionality 3'\n\n # find centroids and H\n x_centroid = np.mean(xs, axis=1)[:, np.newaxis]\n y_centroid = np.mean(ys, axis=1)[:, np.newaxis]\n \n H = (xs - x_centroid)@(ys - y_centroid).T\n\n # find rotation\n U, S, Vt = np.linalg.svd(H)\n rotation = [email protected]\n\n # handling reflection\n if np.linalg.det(rotation) < 0:\n Vt[2, :] *= -1\n rotation = np.dot(Vt.T, U.T)\n \n # find translation\n translation = y_centroid - rotation@x_centroid\n \n return translation, rotation", "def to_se3(self, state: Vector) -> RigidTransform:\n return (state[:9],state[9:])", "def common_line_in3D(phiA,thetaA,phiB,thetaB):\n\n\tfrom math import pi, sqrt, cos, sin, asin, atan2\n\n\tpiOver=pi/180.0;\n\tph1 = phiA*piOver; \n\tth1 = thetaA*piOver; \n\tph2 = phiB*piOver; \n\tth2 = thetaB*piOver;\n\t\n \t#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;\n\t#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;\n\t#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);\n\n\n\tnx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)\n\tny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)\n\tnz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)\n\n\tnorm = nx*nx + ny*ny + nz*nz\n \n\tif norm < 1e-5:\n\t\t#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB\n\t\treturn 0.0, 0.0\n\n\tif nz<0: nx=-nx; ny=-ny; nz=-nz;\n\n\t#thetaCom = asin(nz/sqrt(norm))\n\tphiCom = asin(nz/sqrt(norm))\n\t#phiCom = atan2(ny,nx)\n\tthetaCom = atan2(ny, nx)\n\t\n\treturn phiCom*180.0/pi , thetaCom*180.0/pi", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)", "def ik3(xyz_array):\n # Eqn 1\n theta_1 = np.arctan2(xyz_array[1], xyz_array[0])\n # Eqn 2\n r1 = np.hypot(xyz_array[0], xyz_array[1])\n # Eqn 3\n r2 = xyz_array[2] - link_lengths[0]\n # Eqn 4\n phi2 = np.arctan2(r2, r1)\n # Eqn 5\n r3 = np.hypot(r1, r2)\n # Eqn 6\n num6 = np.power(link_lengths[2], 2) - \\\n np.power(link_lengths[1], 2) - np.power(r3, 2)\n den6 = -2 * link_lengths[1] * r3\n phi1 = np.arccos(num6 / den6)\n # Eqn 7\n # theta_2 = phi2 - phi1 # elbow down\n theta_2 = phi2 + phi1\n # Eqn 8\n num8 = np.power(r3, 2) - \\\n np.power(link_lengths[1], 2) - np.power(link_lengths[2], 2)\n den8 = -2 * link_lengths[1] * link_lengths[2]\n phi3 = np.arccos(num8 / den8)\n # Eqn 9\n # theta_3 = pi - phi3 # elbow down\n theta_3 = -(np.pi - phi3)\n # Output Joint Angles\n theta_1 = np.rad2deg(theta_1)\n theta_2 = np.rad2deg(theta_2)\n theta_3 = np.rad2deg(theta_3)\n joint_rotations = np.array([theta_1, theta_2, theta_3])\n return joint_rotations", "def test_translate_to_center_of_mass(self):\n xyz = \"\"\"O 1.28706525 0.52121353 0.04219198\nC 0.39745682 -0.35265044 -0.63649234\nC 0.36441173 -1.68197093 0.08682400\nH -0.59818222 0.10068325 -0.65235399\nH 0.74799641 -0.48357798 -1.66461710\nH 0.03647269 -1.54932006 1.12314420\nH -0.31340646 -2.38081353 -0.41122551\nH 1.36475837 -2.12581592 0.12433596\nH 2.16336803 0.09985803 0.03295192\n\"\"\"\n translated_xyz = converter.translate_to_center_of_mass(converter.str_to_xyz(xyz))\n cm_x, cm_y, cm_z = converter.get_center_of_mass(xyz=translated_xyz)\n self.assertAlmostEqual(cm_x, 0.0000, 3)\n self.assertAlmostEqual(cm_y, 0.0000, 3)\n self.assertAlmostEqual(cm_z, 0.0000, 3)\n\n xyz = {'coords': ((0.0, 0.0, 0.113488),\n (0.0, 0.93867, -0.264806),\n (0.812912, -0.469335, -0.264806),\n (-0.812912, -0.469335, -0.264806)),\n 'symbols': ('N', 'H', 'H', 'H')}\n translated_xyz = converter.translate_to_center_of_mass(converter.check_xyz_dict(xyz))\n expected_xyz = \"\"\"N 0.00000000 0.00000000 0.06717524\nH 0.00000000 0.93867000 -0.31111876\nH 0.81291200 -0.46933500 -0.31111876\nH -0.81291200 -0.46933500 -0.31111876\"\"\"\n self.assertEqual(converter.xyz_to_str(translated_xyz), expected_xyz)\n cm_x, cm_y, cm_z = converter.get_center_of_mass(translated_xyz)\n self.assertAlmostEqual(cm_x, 0.0000, 3)\n self.assertAlmostEqual(cm_y, 0.0000, 3)\n self.assertAlmostEqual(cm_z, 0.0000, 3)", "def _mat3(self):\n if self.frame.orientation == HillFrame.DEFAULT_ORIENTATION:\n return np.identity(3)\n else:\n return self.QSW2TNW", "def inverse_transform3(phi, theta=0.0, psi=0.0, tx=0.0, ty=0.0, tz=0.0, mirror = 0, scale=1.0):\n\n\td = Transform({'type': 'spider', 'phi': phi, 'theta': theta, 'psi': psi, 'tx': tx, 'ty': ty, 'tz': tz, \"mirror\":mirror,\"scale\":scale})\n\td = d.inverse()\n\td = d.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"mirror\"],d[\"scale\"]", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def decompose(xform, angles=True, shears=False):\n\n # The inline comments in the code below are taken verbatim from\n # the referenced article, [except for notes in square brackets].\n\n # The next step is to extract the translations. This is trivial;\n # we find t_x = M_{4,1}, t_y = M_{4,2}, and t_z = M_{4,3}. At this\n # point we are left with a 3*3 matrix M' = M_{1..3,1..3}.\n xform = np.array(xform).T\n\n if xform.shape == (4, 4):\n translations = xform[ 3, :3]\n xform = xform[:3, :3]\n else:\n translations = np.array([0, 0, 0])\n\n M1 = xform[0]\n M2 = xform[1]\n M3 = xform[2]\n\n # The process of finding the scaling factors and shear parameters\n # is interleaved. First, find s_x = |M'_1|.\n sx = np.sqrt(np.dot(M1, M1))\n M1 = M1 / sx\n\n # Then, compute an initial value for the xy shear factor,\n # s_xy = M'_1 * M'_2. (this is too large by the y scaling factor).\n sxy = np.dot(M1, M2)\n\n # The second row of the matrix is made orthogonal to the first by\n # setting M'_2 = M'_2 - s_xy * M'_1.\n M2 = M2 - sxy * M1\n\n # Then the y scaling factor, s_y, is the length of the modified\n # second row.\n sy = np.sqrt(np.dot(M2, M2))\n\n # The second row is normalized, and s_xy is divided by s_y to\n # get its final value.\n M2 = M2 / sy\n sxy = sxy / sx\n\n # The xz and yz shear factors are computed as in the preceding,\n sxz = np.dot(M1, M3)\n syz = np.dot(M2, M3)\n\n # the third row is made orthogonal to the first two rows,\n M3 = M3 - sxz * M1 - syz * M2\n\n # the z scaling factor is computed,\n sz = np.sqrt(np.dot(M3, M3))\n\n # the third row is normalized, and the xz and yz shear factors are\n # rescaled.\n M3 = M3 / sz\n sxz = sxz / sx\n syz = syz / sy\n\n # The resulting matrix now is a pure rotation matrix, except that it\n # might still include a scale factor of -1. If the determinant of the\n # matrix is -1, negate the matrix and all three scaling factors. Call\n # the resulting matrix R.\n #\n # [We do things different here - if the rotation matrix has negative\n # determinant, the flip is encoded in the x scaling factor.]\n R = np.array([M1, M2, M3])\n if linalg.det(R) < 0:\n R[0] = -R[0]\n sx = -sx\n\n # Finally, we need to decompose the rotation matrix into a sequence\n # of rotations about the x, y, and z axes. [This is done in the\n # rotMatToAxisAngles function]\n if angles: rotations = rotMatToAxisAngles(R.T)\n else: rotations = R.T\n\n retval = [np.array([sx, sy, sz]), translations, rotations]\n\n if shears:\n retval.append(np.array((sxy, sxz, syz)))\n\n return tuple(retval)", "def transform_params(cls, orion_params, space):\n ax_params = {}\n for dim in space.values():\n if dim.type == \"fidelity\":\n continue\n\n ax_params[dim.name] = orion_params[dim.name]\n\n return ax_params", "def similarity_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n k = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n k += w[i]*p_adj.dot(p_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n k = np.sum(S)/k\r\n v_out = k*M*(v - p_wgt) + q_wgt\r\n return v_out", "def rigid_transform_3d(v, mapping, alpha = 1):\r\n p_wgt = vec3(0, 0, 0)\r\n q_wgt = vec3(0, 0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n z = mp[0].z - v.z\r\n if (x == 0 and y == 0 and z == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y + z*z) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A = mat3(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A += w[i]*p_adj.transpose_multiply(q_adj)\r\n A_arr = np.array(A.matrix).reshape(3, 3)\r\n U, S, V = np.linalg.svd(A_arr)\r\n M_arr = np.matmul(np.transpose(V), np.transpose(U))\r\n M = mat3(M_arr.ravel().tolist())\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def alignPairShapes(s1,s2,weights):\n\n\n s1=np.asarray(s1)\n s2=np.asarray(s2)\n \n x1k=s1[:,0]\n y1k=s1[:,1]\n x2k=s2[:,0]\n y2k=s2[:,1]\n\n X1=sum(x1k*weights) \n X2=sum(x2k*weights)\n\n Y1=sum(y1k*weights)\n Y2=sum(y2k*weights)\n\n Z=sum(weights*(pow(x2k,2)+pow(y2k,2)))\n\n W=sum(weights)\n\n C1=sum(weights*(x1k*x2k+y1k*y2k))\n\n C2=sum(weights*(y1k*x2k-x1k*y2k))\n \n a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]])\n b=np.asarray([X1,Y1,C1,C2])\n\n x=np.linalg.solve(a,b)\n\n ax=x[0]\n ay=x[1]\n tx=x[2]\n ty=x[3]\n return ax,ay,tx,ty", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def vs4_func_2(ns, traj, vs_def_beads_ids, vs_params):\n i, j, k, l = vs_def_beads_ids\n a, b, c = vs_params # weight, weight, nm\n c = c * 10 # retrieve amgstrom for MDA\n\n for ts in ns.aa2cg_universe.trajectory:\n pos_i = ns.aa2cg_universe.atoms[i].position\n pos_j = ns.aa2cg_universe.atoms[j].position\n pos_k = ns.aa2cg_universe.atoms[k].position\n pos_l = ns.aa2cg_universe.atoms[l].position\n r_ij = pos_j - pos_i\n r_ik = pos_k - pos_i\n r_il = pos_l - pos_i\n r_ja = a * r_ik - r_ij\n r_jb = b * r_il - r_ij\n r_m = np.cross(r_ja, r_jb)\n traj[ts.frame] = pos_i - c * (r_m / mda.lib.mdamath.norm(r_m))", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def get_affine_matrix2d(\n translations: Tensor,\n center: Tensor,\n scale: Tensor,\n angle: Tensor,\n sx: Tensor | None = None,\n sy: Tensor | None = None,\n) -> Tensor:\n transform: Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def update_parameters(params, grads, alpha):\n n_layers = len(params) // 2\n for i in range(n_layers):\n params['w%s' % (i+1)] = (\n params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])\n params['b%s' % (i+1)] = (\n params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])\n return params", "def eq_to_3d(ra, dec):\r\n x = np.cos(ra) * np.cos(dec)\r\n y = np.sin(ra) * np.cos(dec)\r\n z = np.sin(dec)\r\n return x, y, z", "def EncodeMorton3D(x, y, z):\r\n return Expand3D(x) + (Expand3D(y) << 1) + (Expand3D(z) << 2)", "def transverse_location(n1, theta1, Delta, a, z):\n beta = n1 * np.cos(theta1)\n Gamma = n1 * np.sqrt(2 * Delta) / beta / a\n A = a * np.sin(theta1) / np.sqrt(2 * Delta)\n return A * np.sin(Gamma * z)", "def angle_maps(gamma, delta, ci, cj, w, h, SDD, pxl_size, ph):\n gamma_map = np.empty((h,w)) # initialize detector gamma map\n delta_map = np.empty((h,w)) # initialize detector delta map\n d = np.empty((h,w)) # initialize detector distance map\n corr_i = np.empty((h,w)) # initialize flat detector correction map\n g_offset = (-1.08435537e-6*gamma**2 - \n 0.00084077357*gamma - \n 0.0128920777) # gamma offset (calibration)\n gamma += g_offset # correct gamma position\n d_offset = (1.7280529238e-6*delta**2 - \n 0.000700361461*delta - \n 0.00367551936) # delta offset (calibration)\n delta += d_offset # correct delta position\n nom_gamma = np.deg2rad(gamma) # convert nominal det angles to [rad]\n nom_delta = np.deg2rad(delta) # convert nominal det angles to [rad]\n GAM = np.array([[np.cos(nom_gamma),np.sin(nom_gamma),0], \n [-np.sin(nom_gamma), np.cos(nom_gamma),0], \n [0,0,1]]) # \\Gamma rotational matrix\n DEL = np.array([[1,0,0], # \\Delta rotational matrix\n [0,np.cos(nom_delta),-np.sin(nom_delta)], \n [0,np.sin(nom_delta),np.cos(nom_delta)]])\n rot_mat = np.matmul(GAM,DEL) # multiply rotational matrices\n for j in range(h):\n dz = (cj - j)*pxl_size # delta z (z-distance from det. center)\n for i in range(w):\n dx = (ci - i)*pxl_size # delta x (x-distance from det. center)\n di = np.sqrt(dx**2 + SDD**2 + dz**2) # sample-to-pixel distance\n dr = np.sqrt(dx**2 + dz**2) # center-to-pixel distance\n p = np.array([dx, SDD, dz]) # central pixel position at\n # zero angles in the lab coordinates\n (xp, yp, zp) = np.matmul(rot_mat, p) # central pixel position at\n # nominal detector angle\n gamma_map[j][i] = np.arctan(xp/yp) # map of gamma pixel values\n delta_map[j][i] = np.arcsin(zp/di) # map of delta pixel values\n d[j][i] = di # map of SDD distances\n corr_i[j][i] = 1/(np.cos(np.arctan(dr/SDD))) # flat det. corr.\n corr_d = np.power(d,2)/np.power(SDD,2) # flat det. corr.\n chi = np.arctan(np.tan(delta_map)/np.tan(gamma_map)) # map of chi\n Phor = (1 - \n np.power(np.sin(gamma_map),2)) # horizontal component of \n # polarization correction\n Pver = (1 - \n np.power(np.sin(delta_map)*np.cos(gamma_map),2)) # vertical comp.\n # of polarization correction\n P = ph*Phor + (1-ph)*Pver # polarization correction\n tth = np.arccos(np.cos(delta_map)*np.cos(gamma_map)) # 2th map\n L = 1/(np.sin(tth/2)*np.sin(tth)) # Lorentz correction\n flat = corr_i * corr_d # flat det. correction\n PL = P * L * flat # multiply corrrections\n return tth, chi, PL", "def angleAxis2rot3D(axis, theta):\n if len(axis) is not 3:\n raise ValueError('Number of axis element must be 3!')\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cosTheta = np.cos(theta)\n bracket = 1 - cosTheta\n aBracket = a * bracket\n bBracket = b * bracket\n cBracket = c * bracket\n sinTheta = np.sin(theta)\n aSinTheta = a * sinTheta\n bSinTheta = b * sinTheta\n cSinTheta = c * sinTheta\n rot3D = np.array([[a*aBracket+cosTheta, a*bBracket-cSinTheta, a*cBracket+bSinTheta],\n [b*aBracket+cSinTheta, b*bBracket+cosTheta, b*cBracket-aSinTheta],\n [c*aBracket-bSinTheta, c*bBracket+aSinTheta, c*cBracket+cosTheta]])\n return rot3D", "def vs2_func_1(ns, traj, vs_def_beads_ids, vs_params):\n i, j = vs_def_beads_ids\n a = vs_params # weight\n weights = np.array([1 - a, a])\n\n for ts in ns.aa2cg_universe.trajectory:\n traj[ts.frame] = ns.aa2cg_universe.atoms[[i, j]].center(weights)", "def mesh_verts_align(self, smpl_verts, verts, smpl_J, J_3d, eps=1e-8):\n # finding bounding boxes\n bbox_1_x_min, bbox_1_x_max = np.min (smpl_verts[:, 0]), np.max (smpl_verts[:, 0])\n bbox_1_y_min, bbox_1_y_max = np.min (smpl_verts[:, 1]), np.max (smpl_verts[:, 1])\n # bbox_1_z_min, bbox_1_z_max = np.min (smpl_verts[:, 2]), np.max (smpl_verts[:, 2])\n # H1 = bbox_1_z_max - bbox_1_z_min\n W1 = bbox_1_y_max - bbox_1_y_min\n D1 = bbox_1_x_max - bbox_1_x_min\n\n bbox_2_x_min, bbox_2_x_max = np.min (verts[:, 0]), np.max (verts[:, 0])\n bbox_2_y_min, bbox_2_y_max = np.min (verts[:, 1]), np.max (verts[:, 1])\n # bbox_2_z_min, bbox_2_z_max = np.min (verts[:, 2]), np.max (verts[:, 2])\n # H2 = bbox_2_z_max - bbox_2_z_min\n W2 = bbox_2_y_max - bbox_2_y_min\n D2 = bbox_2_x_max - bbox_2_x_min\n\n # get_centers\n # center_1 = 0.5 * np.array ([(bbox_1_x_min + bbox_1_x_max),\n # (bbox_1_y_min + bbox_1_y_max),\n # (bbox_1_z_min + bbox_1_z_max)])\n #\n # center_2 = 0.5 * np.array ([(bbox_2_x_min + bbox_2_x_max),\n # (bbox_2_y_min + bbox_2_y_max),\n # (bbox_2_z_min + bbox_2_z_max)])\n\n verts = verts - J_3d[0]\n J_3d = J_3d - J_3d[0]\n s = ((D1 / D2 + eps) + (W1 / W2 + eps)) / 2.0\n # verts[:, 0] = verts[:, 0] * (D1 / D2 + eps)\n # verts[:, 1] = verts[:, 1] * (W1 / W2 + eps)\n # verts[:, 2] = verts[:, 2] * (H1 / H2 + eps)\n verts = verts * s\n J_3d = J_3d * s\n\n verts = verts + smpl_J[0]\n J_3d = J_3d + smpl_J[0]\n return verts.astype ('float16'), J_3d.astype ('float16')", "def azalt(ra, dec):\n\tx = rectanglize(ra, dec)\n\ty = np.dot(R_1, x)\n\tz = np.dot(R_2, y)\n\treturn sphericalize(z)", "def alignSurface(*args, caching: bool=True, curvatureContinuity: bool=False, curvatureScale1:\n Union[float, bool]=0.0, curvatureScale2: Union[float, bool]=0.0, directionU:\n bool=True, joinParameter: Union[float, bool]=123456.0, nodeState: Union[int,\n bool]=0, positionalContinuity: bool=True, positionalContinuityType: Union[int,\n bool]=1, reverse1: bool=False, reverse2: bool=False, swap1: bool=False, swap2:\n bool=False, tangentContinuity: bool=True, tangentContinuityType: Union[int,\n bool]=1, tangentScale1: Union[float, bool]=1.0, tangentScale2: Union[float,\n bool]=1.0, twist: bool=False, attach: bool=True, constructionHistory:\n bool=True, keepMultipleKnots: bool=True, name: AnyStr=\"\", object: bool=True,\n replaceOriginal: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def angle3pt(\n ax: float, ay: float, bx: float, by: float, cx: float, cy: float\n ) -> float:\n ang = math.degrees(math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))\n return ang + 360 if ang < 0 else ang", "def align(model, data):\n np.set_printoptions(precision=3,suppress=True)\n model_zerocentered = model - model.mean(1)\n data_zerocentered = data - data.mean(1)\n \n W = np.zeros( (3,3) )\n for column in range(model.shape[1]):\n W += np.outer(model_zerocentered[:,column],data_zerocentered[:,column])\n U,d,Vh = np.linalg.linalg.svd(W.transpose())\n S = np.matrix(np.identity( 3 ))\n if(np.linalg.det(U) * np.linalg.det(Vh)<0):\n S[2,2] = -1\n rot = U*S*Vh\n trans = data.mean(1) - rot * model.mean(1)\n \n model_aligned = rot * model + trans\n alignment_error = model_aligned - data\n \n trans_error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),0)).A[0]\n \n return rot,trans,trans_error", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def comp_alphas(self):\n Rbo = self.get_Rbo()\n\n # alpha_Tt is the angle of the tooth to have the correct top width\n alpha_Tt = 2 * float(arcsin(self.W3 / (2 * Rbo)))\n\n # alpha_0 + alpha_Tt = slot_ptich\n # Zs * (alpha_0+alpha_Tt) = 2 pi\n alpha_0 = 2 * pi / self.Zs - alpha_Tt\n\n if self.is_outwards():\n # alpha_Tb is the angle of the tooth to have the correct bottom width\n alpha_Tb = 2 * float(arcsin(self.W3 / (2 * (Rbo + self.H2))))\n else:\n alpha_Tb = 2 * float(arcsin(self.W3 / (2 * (Rbo - self.H2))))\n\n # Zs * (alpha_2+alpha_Tb) = 2 pi\n alpha_2 = 2 * pi / self.Zs - alpha_Tb\n\n return (alpha_0, alpha_2)", "def create_tangent_angles_equal(self):\n\n self.text_mirror = TextMobject(r\"Specular reflection\")\n self.text_mirror.move_to(4.0 * RIGHT + 2.0 * UP)\n\n self.tex_derive_ti_tr = TexMobject(r\"\\theta_{i}\", r\"=\", r\"\\theta_{r}\", r\"=\", r\"\\theta_{0}\")\n self.tex_derive_ti_tr[0].set_color(self.tex_theta_in_color)\n self.tex_derive_ti_tr[2].set_color(self.tex_theta_ref_color)\n self.tex_derive_ti_tr[4].set_color(RED)\n self.tex_derive_ti_tr.move_to(4.0 * RIGHT + 1.0 * UP)\n\n self.tex_derive_tan_tin_tan_tr = TexMobject(r\"90^{\\circ}\", r\"-\", r\"\\theta_{i}\",\n r\"=\",\n r\"90^{\\circ}\", r\"-\", r\"\\theta_{r}\",\n r\"=\", r\"\\theta_{0}'\")\n for i in range(0,3):\n self.tex_derive_tan_tin_tan_tr[ i].set_color(self.tex_theta_in_color)\n self.tex_derive_tan_tin_tan_tr[4+i].set_color(self.tex_theta_ref_color)\n self.tex_derive_tan_tin_tan_tr[8].set_color(RED)\n self.tex_derive_tan_tin_tan_tr.move_to(4.0 * RIGHT + 0.0 * UP)\n\n self.theta_0 = TexMobject(r\"\\theta_{0}\"). set_color(RED)\n self.theta_0_d = TexMobject(r\"\\theta_{0}'\").set_color(RED)", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def align_z_along_fixed_ends(xyz_file_parts, fixed_beginning, fixed_end):\n\n\t\tmolecule_axis = [xyz_file_parts[-1][1,fixed_end],xyz_file_parts[-1][2,fixed_end],xyz_file_parts[-1][3,fixed_end]]\n\n\n\t\tangle = np.arccos(molecule_axis[2]/np.linalg.norm(molecule_axis))\n\t\ttheta = angle\n\n\t\tif(angle != 0):\n\t\t\t#calculate rotation axis\n\t\t\trotation_axis = np.cross(molecule_axis, [0.0,0.0,1.0])\n\t\t\trotation_axis = 1.0/np.linalg.norm(rotation_axis)*rotation_axis\n\t\t\tu = rotation_axis\n\n\t\t\t#calculate rotation_matrix\n\t\t\trotation_matrix = [[np.cos(theta) + u[0]**2 * (1-np.cos(theta)), u[0] * u[1] * (1-np.cos(theta)) - u[2] * np.sin(theta), u[0] * u[2] * (1 - np.cos(theta)) + u[1] * np.sin(theta)],\n\t [u[0] * u[1] * (1-np.cos(theta)) + u[2] * np.sin(theta), np.cos(theta) + u[1]**2 * (1-np.cos(theta)), u[1] * u[2] * (1 - np.cos(theta)) - u[0] * np.sin(theta)],\n\t [u[0] * u[2] * (1-np.cos(theta)) - u[1] * np.sin(theta), u[1] * u[2] * (1-np.cos(theta)) + u[0] * np.sin(theta), np.cos(theta) + u[2]**2 * (1-np.cos(theta))]]\n\n\t\t\tfor j in range(0, len(xyz_file_parts)):\n\t\t\t\tfor i in range(0, len(xyz_file_parts[j][1,:])):\n\t\t\t\t\t \n\t\t\t\t\tvector_to_rotate = [round(float(xyz_file_parts[j][1,i]),5),round(float(xyz_file_parts[j][2,i]),5),round(float(xyz_file_parts[j][3,i]),5)]\n\t\t\t\t\trotated_vector = np.asmatrix(rotation_matrix)*np.asmatrix(vector_to_rotate).T\n\t\t\t\t\txyz_file_parts[j][1,i] = round(rotated_vector[0,0],5)\n\t\t\t\t\txyz_file_parts[j][2,i] = round(rotated_vector[1,0],5)\n\t\t\t\t\txyz_file_parts[j][3,i] = round(rotated_vector[2,0],5)\n\t\t\treturn xyz_file_parts\n\t\telse:\n\t\t\treturn xyz_file_parts", "def alignCtx(*args, align: bool=True, anchorFirstObject: bool=False, distribute: bool=True,\n exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n showAlignTouch: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def _correct_image3D_by_microscope_param(image3D:np.ndarray, microscope_params:dict):\n _image = copy.copy(image3D)\n if not isinstance(microscope_params, dict):\n raise TypeError(f\"Wrong inputt ype for microscope_params, should be a dict\")\n # transpose\n if 'transpose' in microscope_params and microscope_params['transpose']:\n _image = _image.transpose((0,2,1))\n if 'flip_horizontal' in microscope_params and microscope_params['flip_horizontal']:\n _image = np.flip(_image, 2)\n if 'flip_vertical' in microscope_params and microscope_params['flip_vertical']:\n _image = np.flip(_image, 1)\n return _image", "def get_affine_matrix2d(\n translations: torch.Tensor,\n center: torch.Tensor,\n scale: torch.Tensor,\n angle: torch.Tensor,\n sx: Optional[torch.Tensor] = None,\n sy: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n transform: torch.Tensor = get_rotation_matrix2d(center, -angle, scale)\n transform[..., 2] += translations # tx/ty\n\n # pad transform to get Bx3x3\n transform_h = convert_affinematrix_to_homography(transform)\n\n if any(s is not None for s in [sx, sy]):\n shear_mat = get_shear_matrix2d(center, sx, sy)\n transform_h = transform_h @ shear_mat\n\n return transform_h", "def reflection ((x,y),(w,z)):\n twodot = 2*dotprod((x,y),(w,z))\n a, b = x - twodot* w, y - twodot*z\n return (a,b)", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def ion2_position(self,x,y,z):\n axes_vector = np.array([self.a,self.b,self.c])\n self.ion2 = x*self.a + y*self.b + z*self.c\n self.position['1B'] = np.dot(self.position_map[1],axes_vector) + self.ion2\n self.position['2B'] = np.dot(self.position_map[2],axes_vector) + self.ion2\n self.position['3B'] = np.dot(self.position_map[3],axes_vector) + self.ion2\n self.position['4B'] = np.dot(self.position_map[4],axes_vector) + self.ion2\n self.position['5B'] = np.dot(self.position_map[5],axes_vector) + self.ion2\n self.position['6B'] = np.dot(self.position_map[6],axes_vector) + self.ion2\n self.position['7B'] = np.dot(self.position_map[7],axes_vector) + self.ion2\n self.position['8B'] = np.dot(self.position_map[8],axes_vector) + self.ion2", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def tf_box_3d_to_anchor(boxes_3d):\n\n boxes_3d = tf.reshape(boxes_3d, [-1, 7])\n\n anchors_x = boxes_3d[:, 0]\n anchors_y = boxes_3d[:, 1]\n anchors_z = boxes_3d[:, 2]\n\n # Dimensions along x, y, z\n box_l = boxes_3d[:, 3]\n box_w = boxes_3d[:, 4]\n box_h = boxes_3d[:, 5]\n box_ry = boxes_3d[:, 6]\n\n # Ortho rotate\n half_pi = np.pi / 2\n box_ry = tf.round(box_ry / half_pi) * half_pi\n cos_ry = tf.abs(tf.cos(box_ry))\n sin_ry = tf.abs(tf.sin(box_ry))\n\n anchors_dimx = box_l * cos_ry + box_w * sin_ry\n anchors_dimy = box_h\n anchors_dimz = box_w * cos_ry + box_l * sin_ry\n\n anchors = tf.stack([anchors_x, anchors_y, anchors_z,\n anchors_dimx, anchors_dimy, anchors_dimz],\n axis=1)\n\n return anchors", "def angle_maps_slits(gamma, delta, ci, cj, w, h, SDD, pxl_size, ph, Rs):\n gamma_map = np.empty((h,w)) # initialize detector gamma map\n delta_map = np.empty((h,w)) # initialize detector delta map\n d = np.empty((h,w)) # initialize detector distance map\n corr_i = np.empty((h,w)) # initialize flat detector correction map\n g_offset = (-1.08435537e-6*gamma**2 - \n 0.00084077357*gamma - \n 0.0128920777) # gamma offset (calibration)\n gamma += g_offset # correct gamma position\n d_offset = (1.7280529238e-6*delta**2 - \n 0.000700361461*delta - \n 0.00367551936) # delta offset (calibration)\n delta += d_offset # correct delta position\n nom_gamma = np.deg2rad(gamma) # convert nominal det angles to [rad]\n nom_delta = np.deg2rad(delta) # convert nominal det angles to [rad]\n GAM = np.array([[np.cos(nom_gamma),np.sin(nom_gamma),0], \n [-np.sin(nom_gamma), np.cos(nom_gamma),0], \n [0,0,1]]) # \\Gamma rotational matrix\n DEL = np.array([[1,0,0], # \\Delta rotational matrix\n [0,np.cos(nom_delta),-np.sin(nom_delta)], \n [0,np.sin(nom_delta),np.cos(nom_delta)]])\n rot_mat = np.matmul(GAM,DEL) # multiply rotational matrices\n for j in range(h):\n dz = (cj - j)*pxl_size # delta z (z-distance from det. center)\n for i in range(w):\n dx = (ci - i)*pxl_size # delta x (x-distance from det. center)\n di = np.sqrt(dx**2 + (SDD + Rs)**2 + \n dz**2) # sample-to-pixel distance\n dr = np.sqrt(dx**2 + dz**2) # center-to-pixel distance\n s = np.array([0, Rs, 0]) # sample-to-slit vector\n (xs, ys, zs) = np.matmul(rot_mat, s) # rotate s vector\n p = np.array([dx, (SDD + Rs), dz]) # central pixel position at\n # zero angles in the lab coordinates\n (xp, yp, zp) = np.matmul(rot_mat, p) # central pixel position at\n # nominal detector angle\n dps = np.sqrt((xp - xs)**2 + (yp - ys)**2 + \n (zp - zs)**2) # pixel-to-slit distance\n gamma_map[j][i] = np.arctan((xp - xs)/(yp - ys)) # gamma map\n delta_map[j][i] = np.arcsin((zp - zs)/dps) # delta map\n d[j][i] = di # map of SDD distances\n corr_i[j][i] = 1/(np.cos(np.arctan(dr/SDD))) # flat det. corr.\n corr_d = np.power(d,2)/np.power(SDD,2) # flat det. corr.\n chi = np.arctan(np.tan(delta_map)/np.tan(gamma_map)) # map of chi\n Phor = (1 - \n np.power(np.sin(gamma_map),2)) # horizontal component of \n # polarization correction\n Pver = (1 - \n np.power(np.sin(delta_map)*np.cos(gamma_map),2)) # vertical comp.\n # of polarization correction\n P = ph*Phor + (1-ph)*Pver # polarization correction\n tth = np.arccos(np.cos(delta_map)*np.cos(gamma_map)) # 2th map\n L = 1/(np.sin(tth/2)*np.sin(tth)) # Lorentz correction\n flat = corr_i * corr_d # flat det. correction\n PL = P * L * flat # multiply corrrections\n return tth, chi, PL", "def affine_align(x, y, p1, p2, g, s):\n #Create M, Ix, and Iy as Y x X matrices of 0's\n M = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Ix = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Iy = [[0]*(len(x)+1) for i in range(len(y)+1)]\n #Set up initial values for Ix and Iy\n #M infs along both axes\n for i in range(1, len(y)+1):\n M[i][0] = -math.inf\n for j in range(1, len(x)+1):\n M[0][j] = -math.inf\n #Ix: Aligning X with gap, horizontal move, infs along top row\n for i in range(0, len(y)+1):\n Ix[i][0] = -math.inf\n #Gap penalties along left column\n for j in range(1, len(x)+1):\n Ix[0][j] = -g if Ix[0][j-1] == -math.inf else Ix[0][j-1] - s\n #Iy: Aligning Y with gap, vertical move, infs along left column\n for j in range(0, len(x)+1):\n Iy[0][j] = -math.inf\n #Gap penalties along top row\n for i in range(1, len(y)+1):\n Iy[i][0] = -g if Iy[i-1][0] == -math.inf else Iy[i-1][0] - s\n #Populate remaining cells\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n M[i][j] = max(M[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Ix[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Iy[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2))\n Ix[i][j] = max(M[i][j-1] - g,\n Ix[i][j-1] - s)\n Iy[i][j] = max(M[i-1][j] - g,\n Iy[i-1][j] - s)\n #TRACEBACK\n x_ret=\"\"; y_ret=\"\"\n i = len(y); j = len(x)\n #Determine start matrix\n align_scores = (M[i][j], Iy[i][j], Ix[i][j])\n matrix_idx = align_scores.index(max(align_scores))\n #matrix_key will track the current matrix through the traceback\n matrix_key = [\"M\", \"Iy\", \"Ix\"][matrix_idx]\n while i > 0 and j > 0:\n #From M: Check diagonal moves back to all three matrices, align characters\n if matrix_key == \"M\":\n if M[i][j] == M[i-1][j-1] + p1 or M[i][j] == M[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"M\"\n elif M[i][j] == Iy[i-1][j-1] + p1 or M[i][j] == Iy[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Iy\"\n elif M[i][j] == Ix[i-1][j-1] + p1 or M[i][j] == Ix[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Ix\"\n #From Iy: Check vertical move to Iy and M, align y character with x gap\n elif matrix_key == \"Iy\":\n if Iy[i][j] == M[i-1][j] - g:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"M\"\n elif Iy[i][j] == Iy[i-1][j] - s:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"Iy\"\n #From Ix: Check horizontal move to Ix and M, align x character with y gap\n elif matrix_key == \"Ix\":\n if Ix[i][j] == M[i][j-1] - g:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"M\"\n elif Ix[i][j] == Ix[i][j-1] - s:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"Ix\"\n #Finish sequence if edge was reached\n #i>0 means mach remaining characters in y with gaps in x\n if i > 0:\n x_ret = (\"_\"*i) + x_ret\n y_ret = y[0:i] + y_ret\n #j>0 means mach remaining characters in x with gaps in y\n if j > 0:\n x_ret = x[0:j] + x_ret\n y_ret = (\"_\"*j) + y_ret\n #Return alinged strings\n return (x_ret, y_ret)", "def translation(self, x, y, z) -> None:\n ...", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def T2(params):\n\t# handle the base frame, eqn 3.9, p36\n\tt = np.array([\n\t\t[1.0, 0.0, 0.0, 0.0],\n\t\t[0.0, 1.0, 0.0, 0.0],\n\t\t[0.0, 0.0, 1.0, 0.0],\n\t\t[0.0, 0.0, 0.0, 1.0]\n\t])\n\tfor i, p in enumerate(params):\n\t\tt = t.dot(rot(*p))\n\treturn t", "def align_c_axis_along_001(structure):\n\n c = structure.lattice._matrix[2]\n z = [0, 0, 1]\n axis = np.cross(c, z)\n if not(axis[0] == 0 and axis[1] == 0):\n theta = (np.arccos(np.dot(c, z) / (np.linalg.norm(c) * np.linalg.norm(z))))\n R = get_rotation_matrix(axis, theta)\n rotation = SymmOp.from_rotation_and_translation(rotation_matrix=R)\n structure.apply_operation(rotation)\n return structure", "def kinect_transform(self, x, y, z):\n xposition = x\n yposition = y\n zposition = z\n\n return zposition, xposition, yposition", "def translation_3D(img, trans_x, trans_y, trans_z, cval=0.):\n \n if trans_x > 0:\n img[trans_x:,...] = img[:-trans_x,...] \n img[:trans_x,...] = cval\n elif trans_x < 0:\n img[:trans_x,...] = img[-trans_x:,...] \n img[trans_x:,...] = cval\n \n if trans_y > 0:\n img[:,trans_y:,:,:] = img[:,:-trans_y,:,:] \n img[:,:trans_y,:,:] = cval\n elif trans_y < 0:\n img[:,:trans_y,:,:] = img[:,-trans_y:,:,:] \n img[:,trans_y:,:,:] = cval\n \n if trans_z > 0:\n img[...,trans_z:,:] = img[...,:-trans_z,:] \n img[...,:trans_z,:] = cval\n elif trans_z < 0:\n img[...,:trans_z,:] = img[...,-trans_z:,:] \n img[...,trans_z:,:,:] = cval\n \n return img", "def transform(self, ((a, b), (c, d))=((1, 1), (-1, 1)), aligned_with_grid=False):\n (x0, y0), (x1, y1) = self.vertices\n return type(self)((int(a * x0 + c * y0), int(b * x0 + d * y0)),\n (int(a * x1 + c * y1), int(b * x1 + d * y1)),\n aligned_with_grid=aligned_with_grid)", "def update_affine_param( cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2\n cur_af = cur_af.view(cur_af.shape[0], 4, 3)\n last_af = last_af.view(last_af.shape[0],4,3)\n updated_af = torch.zeros_like(cur_af.data).to(cur_af.device)\n dim =3\n updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])\n updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)\n updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)\n return updated_af", "def xyz2plane(x,y,z, new_x=[], plane=[], origin=None):\n # preliminary stuff\n if origin != None: x = x - origin\n a,b,c,d = plane\n bottom = np.sqrt(a*a + b*b + c*c) # normalize\n a,b,c,d = a/bottom, b/bottom, c/bottom, d/bottom\n px, py, pz = new_x\n bot = np.sqrt(px*px + py*py + pz*pz) #normalize\n px, py, pz = px/bot, py/bot, pz/bot\n p0 = [px,py,pz]\n # do rotation\n z_hat = [a,b,c]\n y_hat = cross(z_hat, p0)\n x_hat = cross(y_hat, z_hat)\n if type(x)==type(arr) or type(x)==type([]):\n xp, yp, zp = [], [], []\n for i in range(len(x)):\n xp.append(dot([x[i],y[i],z[i]], x_hat))\n yp.append(dot([x[i],y[i],z[i]], y_hat))\n zp.append(dot([x[i],y[i],z[i]], z_hat))\n else:\n xp = dot([x,y,z], x_hat)\n yp = dot([x,y,z], y_hat)\n zp = dot([x,y,z], z_hat)\n return xp, yp, zp" ]
[ "0.76915425", "0.6404001", "0.6382034", "0.6108902", "0.6060465", "0.6017704", "0.57541436", "0.5705645", "0.5625745", "0.5595656", "0.5591025", "0.55300176", "0.5473145", "0.53983897", "0.53954655", "0.5385849", "0.53646827", "0.53643596", "0.536235", "0.5350919", "0.5346317", "0.53293073", "0.53148276", "0.53096694", "0.5309128", "0.52827907", "0.5273503", "0.5267859", "0.52617663", "0.525534", "0.5250833", "0.5249019", "0.5248036", "0.5246486", "0.5243372", "0.5222907", "0.5222205", "0.52140826", "0.5211444", "0.5201146", "0.5197476", "0.5185851", "0.51833445", "0.5171697", "0.5163894", "0.51585984", "0.5154378", "0.51529694", "0.5152591", "0.51510864", "0.5150561", "0.51177496", "0.5108603", "0.51044184", "0.5104009", "0.5098016", "0.50925505", "0.50918674", "0.50890964", "0.5076465", "0.506508", "0.5060541", "0.5045744", "0.5029398", "0.50252664", "0.50094765", "0.50073683", "0.5005735", "0.5005281", "0.4988528", "0.49824896", "0.49703577", "0.49686432", "0.49681535", "0.49582702", "0.49564183", "0.4950071", "0.49484822", "0.49348092", "0.4930386", "0.49292597", "0.49276948", "0.49269262", "0.49193728", "0.4912565", "0.4910807", "0.49088544", "0.4899996", "0.48955253", "0.48868582", "0.48861724", "0.48820895", "0.48795775", "0.4876878", "0.48675188", "0.48603594", "0.48576683", "0.48557213", "0.48541582", "0.48449603" ]
0.7371874
1
Commented by Zhengfan Yang on 05/01/07 I made some change to the original amoeba so that it can now pass out some values calculated by func other than the criteria. This is important in multilevel amoeba refinement because otherwise, upper level refinement will lose the information of lower level refinement.
def amoeba_multi_level(var, scale, func, ftolerance=1.e-4, xtolerance=1.e-4, itmax=500, data=None): #print " ENTER AMOEBA MULTI LEVEL" nvar = len(var) # number of variables in the minimization nsimplex = nvar + 1 # number of vertices in the simplex # first set up the simplex simplex = [0]*(nvar+1) # set the initial simplex simplex[0] = var[:] for i in xrange(nvar): simplex[i+1] = var[:] simplex[i+1][i] += scale[i] fvalue = [] for i in xrange(nsimplex): # set the function values for the simplex result, passout = func(simplex[i], data=data) #print " amoeba setting ",i,simplex[i],result, passout fvalue.append([result, passout]) # Ooze the simplex to the maximum iteration = 0 while 1: # find the index of the best and worst vertices in the simplex ssworst = 0 ssbest = 0 for i in xrange(nsimplex): if fvalue[i][0] > fvalue[ssbest][0]: ssbest = i if fvalue[i][0] < fvalue[ssworst][0]: ssworst = i # get the average of the nsimplex-1 best vertices in the simplex pavg = [0.0]*nvar for i in xrange(nsimplex): if i != ssworst: for j in range(nvar): pavg[j] += simplex[i][j] for j in xrange(nvar): pavg[j] = pavg[j]/nvar # nvar is nsimplex-1 simscale = 0.0 for i in range(nvar): simscale += abs(pavg[i]-simplex[ssworst][i])/scale[i] simscale = simscale/nvar # find the range of the function values fscale = (abs(fvalue[ssbest][0])+abs(fvalue[ssworst][0]))/2.0 if fscale != 0.0: frange = abs(fvalue[ssbest][0]-fvalue[ssworst][0])/fscale else: frange = 0.0 # all the fvalues are zero in this case # have we converged? if (((ftolerance <= 0.0 or frange < ftolerance) and # converged to maximum (xtolerance <= 0.0 or simscale < xtolerance)) or # simplex contracted enough (itmax and iteration >= itmax)): # ran out of iterations return simplex[ssbest],fvalue[ssbest][0],iteration,fvalue[ssbest][1] # reflect the worst vertex pnew = [0.0]*nvar for i in xrange(nvar): pnew[i] = 2.0*pavg[i] - simplex[ssworst][i] fnew = func(pnew,data=data) if fnew[0] <= fvalue[ssworst][0]: # the new vertex is worse than the worst so shrink # the simplex. for i in xrange(nsimplex): if i != ssbest and i != ssworst: for j in xrange(nvar): simplex[i][j] = 0.5*simplex[ssbest][j] + 0.5*simplex[i][j] fvalue[i] = func(simplex[i],data=data) for j in xrange(nvar): pnew[j] = 0.5*simplex[ssbest][j] + 0.5*simplex[ssworst][j] fnew = func(pnew, data=data) elif fnew[0] >= fvalue[ssbest][0]: # the new vertex is better than the best so expand # the simplex. pnew2 = [0.0]*nvar for i in xrange(nvar): pnew2[i] = 3.0*pavg[i] - 2.0*simplex[ssworst][i] fnew2 = func(pnew2,data=data) if fnew2[0] > fnew[0]: # accept the new vertex in the simplexe pnew = pnew2 fnew = fnew2 # replace the worst vertex with the new vertex for i in xrange(nvar): simplex[ssworst][i] = pnew[i] fvalue[ssworst] = fnew iteration += 1 #print "Iteration:",iteration," ",ssbest," ",fvalue[ssbest]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fit_amoeba(self, kwargs, verbose):\n\n args_init = self._param_class.kwargs_to_args(kwargs)\n\n options = {\n \"adaptive\": True,\n \"fatol\": self._tol_simplex_func,\n \"maxiter\": self._simplex_n_iterations * len(args_init),\n }\n\n method = \"Nelder-Mead\"\n\n if verbose:\n print(\"starting amoeba... \")\n\n opt = minimize(\n self.fast_rayshooting.chi_square,\n x0=args_init,\n method=method,\n options=options,\n )\n\n kwargs = self._param_class.args_to_kwargs(opt[\"x\"])\n source_penalty = opt[\"fun\"]\n\n return kwargs, source_penalty", "def psi(a):", "def f1_part_i(x,m_ind):\n\n #f = max(2.0*rho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1] ,2.0*rho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1])\n tmp1 = 2.0*rho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1]\n tmp2 = 2.0*rho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1]\n \n # checking absolute value of rho-b_i = rho1-rho2-b_i\n #if (tmp1-tmp2 > cfg.a[m_ind,cfg.nfea-1]):\n # cfg.alpha[m_ind] = 1.0\n #if (tmp1-tmp2 == cfg.a[m_ind,cfg.nfea-1]):\n # cfg.alpha[m_ind] = 0.5\n #else:\n # cfg.alpha[m_ind] = 0.0\n \n # checking maximum used in rho1 \n if (tmp1 > tmp2):\n f = tmp1\n cfg.alpha1[m_ind] = 1 \n elif (tmp1 < tmp2):\n f = tmp2\n cfg.alpha1[m_ind] = 0 \n else:\n f = tmp2\n cfg.alpha1[m_ind] = 2 \n \n return f", "def amoeba(transform, parameters_tolerance=0.1, function_tolerance=0.0001, max_iterations=300, scales=None, initial_simplex_size=None):\n\n #\n # Iteration Observer\n #\n #def iterationUpdate():\n # print optimizer.GetInitialSimplexDelta()\n # print transform.GetParameters()\n \n optimizer = itk.AmoebaOptimizer.New()\n optimizer.MinimizeOn()\n # Medimax <-> Numerical Recipes in C\n # recalage/mtch_3d.c:get_facteur_precision\n # NORMAL : 1\n # PRECIS : 0.1\n # TRES_PRECIS : 0.01\n # PRECISION_MAX : 0.0001\n optimizer.SetParametersConvergenceTolerance(parameters_tolerance) # 1/10th pixel\n optimizer.SetFunctionConvergenceTolerance(function_tolerance) # 0.001 bits\n optimizer.SetMaximumNumberOfIterations(max_iterations)\n \n if initial_simplex_size is not None :\n optimizer.AutomaticInitialSimplexOff()\n delta = transform.GetNumberOfParameters()*(initial_simplex_size,) # the initial size of the simplex (initial_simplex_size units in each of the parameters)\n print delta\n optimizer.SetInitialSimplexDelta(delta)\n else :\n optimizer.AutomaticInitialSimplexOn()\n\n if scales is not None :\n optimizer.SetScales(scales)\n\n #iterationCommand = itk.PyCommand.New()\n #iterationCommand.SetCommandCallable( iterationUpdate )\n #optimizer.AddObserver( itk.IterationEvent(), iterationCommand.GetPointer() )\n \n return optimizer", "def apply(self):", "def solve(self):", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def alsace(func, N, jpdf, tol=1e-22, sample_type='R', limit_cond=100,\n max_fcalls=1000, seed=123, ed_file=None, ed_fevals_file=None,\n verbose=True, pce_dict={}):\n\n if not pce_dict: # if pce_dict is empty --> cold-start\n idx_act = []\n idx_act.append([0]*N) # start with 0 multi-index\n idx_adm = []\n # set seed\n ot.RandomGenerator.SetSeed(seed)\n ed_size = 2*N # initial number of samples\n # initial experimental design and coresponding evaluations\n ed, ed_fevals = get_ed(func, jpdf, ed_size, sample_type=sample_type,\n knots=[], values=[], ed_file=ed_file,\n ed_fevals_file=ed_fevals_file)\n global_error_indicator = 1.0 # give arbitrary sufficiently large value\n\n # get the distribution type of each random variable\n dist_types = []\n for i in range(N):\n dist_type = jpdf.getMarginal(i).getName()\n dist_types.append(dist_type)\n\n # create orthogonal univariate bases\n poly_collection = ot.PolynomialFamilyCollection(N)\n for i in range(N):\n pdf = jpdf.getDistributionCollection()[i]\n algo = ot.AdaptiveStieltjesAlgorithm(pdf)\n poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)\n\n # create multivariate basis\n mv_basis = ot.OrthogonalProductPolynomialFactory(\n poly_collection,\n ot.LinearEnumerateFunction(N)\n )\n # get enumerate function (multi-index handling)\n enum_func = mv_basis.getEnumerateFunction()\n\n else: # get data from dictionary\n idx_act = pce_dict['idx_act']\n idx_adm = pce_dict['idx_adm']\n pce_coeff_act = pce_dict['pce_coeff_act']\n pce_coeff_adm = pce_dict['pce_coeff_adm']\n ed = pce_dict['ed']\n ed_fevals = pce_dict['ed_fevals']\n ed_size = len(ed_fevals)\n # compute local and global error indicators\n global_error_indicator = np.sum(np.array(pce_coeff_adm)**2)\n enum_func = pce_dict['enum_func']\n mv_basis = pce_dict['mv_basis']\n\n #\n while ed_size < max_fcalls and global_error_indicator > tol:\n # the index added last to the activated set is the one to be refined\n last_act_idx = idx_act[-1][:]\n # get admissible neighbors of the lastly added index\n adm_neighbors = admissible_neighbors(last_act_idx, idx_act)\n # update admissible indices\n idx_adm = idx_adm + adm_neighbors\n # get polynomial basis for the LS problem\n idx_ls = idx_act + idx_adm\n idx_ls_single = transform_multi_index_set(idx_ls, enum_func)\n ls_basis = mv_basis.getSubBasis(idx_ls_single)\n ls_basis_size = len(ls_basis)\n\n # construct the design matrix D and compute its QR decomposition and its\n # condition number\n D = get_design_matrix(ls_basis, ed)\n Q, R = sp.qr(D, mode='economic')\n condD = np.linalg.cond(R)\n\n # get largest eigenvalue of A^-1\n A = np.matmul(D.T, D) / ed_size\n# lambda_max=power_iteration(A,1000)\n# lambda_min=power_iteration(A-lambda_max*np.eye(A.shape[0]),10000)+lambda_max\n#\n# print('--------- power iteration ----------')\n# print('lambda max= ', lambda_max)\n# print('lambda min= ', lambda_min)\n# print('lambda max inv= ', 1./lambda_min)\n# print('--------- numpy ----------')\n# print('lambda max= ', max(np.linalg.eig(A)[0]))\n# print('lambda min= ', min(np.linalg.eig(A)[0]))\n# print('lambda max inv lambda= ', 1./min(np.linalg.eig(A)[0]))\n# print('lambda max inv A= ', max(np.linalg.eig(np.linalg.inv(A))[0]))\n# print('')\n# print('')\n eigA = 1./min(np.linalg.eig(A)[0])\n # If condD becomes too large, enrich the ED until condD becomes acceptable\n # or until ed_size reaches max_fcalls\n while (eigA > limit_cond and ed_size < max_fcalls) or ed_size < ls_basis_size:\n # inform user\n if verbose:\n print('WARNING: condition(D) = ' , condD)\n print('WARNING: lambda_max(A^-1) = ' , eigA)\n print(\"\")\n # select new size for the ED\n if ls_basis_size > ed_size:\n ed_size = ls_basis_size + N\n elif ed_size + N > max_fcalls:\n ed_size = max_fcalls\n else:\n ed_size = ed_size + N\n # expand ED\n ed, ed_fevals = get_ed(func, jpdf, ed_size, sample_type=sample_type,\n knots=ed, values=ed_fevals, ed_file=ed_file,\n ed_fevals_file=ed_fevals_file)\n # construct the design matrix D and compute its QR decomposition and its\n # condition number\n D = get_design_matrix(ls_basis, ed)\n Q, R = sp.qr(D,mode='economic')\n condD = np.linalg.cond(R)\n A = np.matmul(D.T, D) / ed_size\n eigA = 1./min(np.linalg.eig(A)[0])\n\n # solve LS problem\n c = Q.T.dot(ed_fevals)\n pce_coeff_ls = sp.solve_triangular(R, c)\n\n # find the multi-index with the largest contribution, add it to idx_act\n # and delete it from idx_adm\n pce_coeff_act = pce_coeff_ls[:len(idx_act)].tolist()\n pce_coeff_adm = pce_coeff_ls[-len(idx_adm):].tolist()\n help_idx = np.argmax(np.abs(pce_coeff_adm))\n idx_add = idx_adm.pop(help_idx)\n pce_coeff_add = pce_coeff_adm.pop(help_idx)\n idx_act.append(idx_add)\n pce_coeff_act.append(pce_coeff_add)\n\n # store expansion data in dictionary\n pce_dict = {}\n pce_dict['idx_act'] = idx_act\n pce_dict['idx_adm'] = idx_adm\n pce_dict['pce_coeff_act'] = pce_coeff_act\n pce_dict['pce_coeff_adm'] = pce_coeff_adm\n pce_dict['ed'] = ed\n pce_dict['ed_fevals'] = ed_fevals\n pce_dict['enum_func'] = enum_func\n pce_dict['mv_basis'] = mv_basis\n return pce_dict", "def integ_exact(model,func_params):\n\n\n if (model=='genz_osc'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=cos(2.*pi*gcf[0]+0.5*sum(gcf[1:]))\n for i in range(1,dim+1):\n integ_ex*=(2.*sin(gcf[i]/2.)/gcf[i])\n elif (model=='genz_exp'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n at1=exp(-gcf[i]*gcf[0])\n at2=exp(gcf[i]*(1.-gcf[0]))\n integ_ex*=((at2-at1)/(gcf[i]))\n elif (model=='genz_cont'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n integ_ex*= ((2.-exp(gcf[i]*(-gcf[0]))-exp(gcf[i]*(gcf[0]-1.)))/gcf[i])\n elif (model=='genz_gaus'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n at1=erf(-gcf[i]*gcf[0])\n at2=erf(gcf[i]*(1.-gcf[0]))\n integ_ex*=((at2-at1)*sqrt(pi)/(2.*gcf[i]))\n elif (model=='genz_cpeak'):\n gcf=func_params\n dim=gcf.shape[0]-1\n numer=0.0\n count=1\n denom=1.\n for i in range(1,dim+1):\n comb=list(itertools.combinations(range(1,dim+1),i))\n for j in range(len(comb)):\n assert(i==len(comb[j]))\n #print i,j,pow(-1,i)\n numer+=(pow(-1,i)/(1.+sum(gcf[list(comb[j])])))\n count+=1\n denom*=(i*gcf[i])\n #print count, numer\n integ_ex=(1.+numer)/denom\n elif (model=='genz_ppeak'):\n gcf=func_params\n dim=gcf.shape[0]-1\n integ_ex=1.\n for i in range(1,dim+1):\n at1=np.arctan(-gcf[i]*gcf[0])\n at2=np.arctan(gcf[i]*(1.-gcf[0]))\n integ_ex*=(gcf[i]*(at2-at1))\n\n return integ_ex", "def mezclar_bolsa(self):", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration", "def full_solver(output_folder, prior_filename, data_filename, resume=True, test_plot=False):\n\n\n def log_prior(cube, ndim, nparams):\n cube[0] = cube[0]*(L_lim[1] - L_lim[0]) + L_lim[0]\n cube[1] = cube[1]*(d_lim[1] - d_lim[0]) + d_lim[0]\n cube[2] = cube[2]*(F_lim[1] - F_lim[0]) + F_lim[0]\n cube[3] = cube[3]*(A_lim[1] - A_lim[0]) + A_lim[0]\n cube[4] = cube[4]*(Arel_lim[1] - Arel_lim[0]) + Arel_lim[0]\n cube[5] = cube[5]*(Ti_lim[1] - Ti_lim[0]) + Ti_lim[0]\n #cube[6] = cube[6]*(off_lim[1] - off_lim[0]) + off_lim[0]\n cube[6] = cube[6]*(Brel_lim[1] - Brel_lim[0]) + Brel_lim[0]\n #cube[7] = cube[7]*(Brel_lim[1] - Brel_lim[0]) + Brel_lim[0]\n\n\n def log_likelihood(cube, ndim, nparams):\n #vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]], [Ti_Th, cube[5]], [0.0, 0.0], nlambda=2000)\n #vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3], cube[3]*cube[7]], [Ti_Th, cube[5], Ti_Th],\n # [0.0, 0.0, 0.0], nlambda=2000)\n vals = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3], cube[3]*cube[6]], [Ti_Th, cube[5], Ti_Th],\n [0.0, 0.0, 0.0], nlambda=2000)\n #vals = offset_forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]], [Ti_Th, cube[5]],\n # [0.0, 0.0], sm_ang=False, nlambda=2000, coeff=0.5)\n #vals += cube[6]\n chisq = np.sum((vals - sig)**2 / error**2)\n return -chisq / 2.0\n\n data = io.h5_2_dict(data_filename)\n\n ix = data['fit_ix']['0']#[0:-1:2]\n r = data['r'][ix]\n sig = data['sig'][ix]\n error = data['sig_sd'][ix]\n\n Ti_Th = 0.025*1000.0 / 300.0\n\n px_size = 0.004# * 3 \n L_lim = [147.0, 153.0]\n L_lim = [x / px_size for x in L_lim]\n\n d_lim = [0.7, 1.0]\n\n F_lim = [17.0, 26.0]\n\n Amax = np.max(sig)\n A_lim = [0.75*Amax, 2.0*Amax]\n\n Arel_lim = [0.005, 0.6]\n Brel_lim = [0.001, 0.2]\n\n Ti_lim = [0.025, 1.0]\n #min_val = np.abs(np.min(data['sig'][ix]))\n min_val = 50.0\n off_lim = [0.0, min_val]\n #n_params = 6\n n_params = 7\n #n_params = 8\n folder = abspath(output_folder)\n\n if test_plot:\n pass\n # npts = 100\n # test_sig = np.zeros((npts, len(r)))\n # for i in xrange(npts):\n # cube = [random.random() for _ in xrange(n_params)] \n # log_prior(cube, None, None)\n # test_sig[i, :] = forward_model(r, cube[0], cube[1], cube[2], w0, mu, [cube[3]*cube[4], cube[3]],\n # [Ti_Th, cube[5]], [0.0, 0.0], nlambda=2000)\n\n # fig, ax = plt.subplots()\n # for i in xrange(npts):\n # ax.plot(r, test_sig[i, :], 'C0')\n # ax.errorbar(r, sig, yerr=error, fmt='', ecolor='C2', color='C1')\n # plt.show()\n\n else:\n pymultinest.run(log_likelihood, log_prior, n_params, importance_nested_sampling=False,\n resume=resume, verbose=True, sampling_efficiency='model', n_live_points=1000,\n outputfiles_basename=join(folder, 'full_'))", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def _analyze_opt_criterias_(criterias, sign_threshold, max_num_cofactors, file_prefix, with_qq_plots,\n lm, step_info_list, quantiles_dict, plot_bonferroni=True, cand_genes=None,\n plot_xaxis=True, log_qq_max_val=5, eig_L=None, type='emmax', highlight_loci=None,\n write_pvals=False, snp_priors=None, ppa_threshold=0.5, emma_num=None,\n save_pvals=False , **kwargs):\n ret_dict = {}\n opt_indices = {}\n opt_file_dict = {}\n for c in criterias:\n print 'GWAs for optimal %s criteria:' % c\n if c == 'bonf':\n opt_list = sp.arange(max_num_cofactors + 1)\n for i, pval in enumerate(criterias['bonf']):\n if pval > sign_threshold:\n opt_list[i] = -1\n i_opt = opt_list.argmax()\n elif c == 'mbonf':\n fw_opt_list = sp.arange(max_num_cofactors + 1)\n for i in range(max_num_cofactors + 1):\n pval = criterias[c][i]\n if pval > sign_threshold:\n fw_opt_list[i] = -1\n fw_i_opt = fw_opt_list.argmax()\n fw_max = fw_opt_list[fw_i_opt]\n\n if max_num_cofactors > 1:\n shift = max_num_cofactors + 1\n bw_opt_list = sp.arange(max_num_cofactors - 1, 0, -1)\n for i in range(len(bw_opt_list)):\n pval = criterias[c][i + shift]\n if pval > sign_threshold:\n bw_opt_list[i] = -1\n bw_i_opt = bw_opt_list.argmax()\n bw_max = bw_opt_list[bw_i_opt]\n bw_i_opt = bw_opt_list.argmax() + shift\n if bw_max == fw_max:\n i_opt = bw_i_opt if criterias[c][fw_i_opt] > criterias[c][bw_i_opt] else fw_i_opt\n else:\n i_opt = bw_i_opt if bw_max > fw_max else fw_i_opt\n else:\n i_opt = fw_i_opt\n elif c == 'min_cof_ppa':\n fw_opt_list = sp.arange(max_num_cofactors + 1)\n for i in range(max_num_cofactors + 1):\n ppa = criterias[c][i]\n if ppa < ppa_threshold:\n fw_opt_list[i] = -1\n fw_i_opt = fw_opt_list.argmax()\n fw_max = fw_opt_list[fw_i_opt]\n\n if max_num_cofactors > 1:\n shift = max_num_cofactors + 1\n bw_opt_list = sp.arange(max_num_cofactors - 1, 0, -1)\n for i in range(len(bw_opt_list)):\n ppa = criterias[c][i + shift]\n if ppa < ppa_threshold:\n bw_opt_list[i] = -1\n bw_i_opt = bw_opt_list.argmax()\n bw_max = bw_opt_list[bw_i_opt]\n bw_i_opt = bw_opt_list.argmax() + shift\n if bw_max == fw_max:\n i_opt = bw_i_opt if criterias[c][fw_i_opt] > criterias[c][bw_i_opt] else fw_i_opt\n else:\n i_opt = bw_i_opt if bw_max > fw_max else fw_i_opt\n else:\n i_opt = fw_i_opt\n\n else:\n cur_min_val = criterias[c][0]\n min_indices = [0]\n for i in range(1, len(criterias[c])):\n v = criterias[c][i]\n if v < cur_min_val:\n cur_min_val = v\n min_indices = [i]\n if v == cur_min_val:\n min_indices.append(i)\n i_opt = min(min_indices)\n # i_opt = sp.argmin(criterias[c])\n print \" %d'th step was optimal.\" % i_opt\n ret_dict[c] = i_opt\n if i_opt <= max_num_cofactors:\n # Copy the pngs...\n if file_prefix:\n png_file_name = '%s_step%d.png' % (file_prefix, i_opt)\n opt_png_file_name = '%s_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if snp_priors != None:\n png_file_name = '%s_ppa_step%d.png' % (file_prefix, i_opt)\n opt_png_file_name = '%s_ppa_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if with_qq_plots:\n qq_png_file_name = '%s_step%d_qqplot.png' % (file_prefix, i_opt)\n opt_qq_png_file_name = '%s_step%d_opt_%s_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', qq_png_file_name, opt_qq_png_file_name)\n log_qq_png_file_name = '%s_step%d_log_qqplot.png' % (file_prefix, i_opt)\n opt_log_qq_png_file_name = '%s_step%d_opt_%s_log_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', log_qq_png_file_name, opt_log_qq_png_file_name)\n elif i_opt in opt_file_dict:\n if file_prefix:\n png_file_name = opt_file_dict[i_opt]['manhattan']\n opt_png_file_name = '%s_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n if snp_priors != None:\n png_file_name = opt_file_dict[i_opt]['ppa_manhattan']\n opt_png_file_name = '%s_ppa_step%d_opt_%s.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', png_file_name, opt_png_file_name)\n\n if with_qq_plots:\n qq_png_file_name = opt_file_dict[i_opt]['qq']\n opt_qq_png_file_name = '%s_step%d_opt_%s_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', qq_png_file_name, opt_qq_png_file_name)\n log_qq_png_file_name = opt_file_dict[i_opt]['log_qq']\n opt_log_qq_png_file_name = '%s_step%d_opt_%s_log_qqplot.png' % (file_prefix, i_opt, c)\n # os.spawnlp(os.P_NOWAIT, 'cp', 'cp', log_qq_png_file_name, opt_log_qq_png_file_name)\n\n elif not i_opt in opt_indices:\n # Perfom GWAS witht the optimal cofactors\n cofactor_snps = step_info_list[i_opt]['cofactor_snps']\n cofactors = step_info_list[i_opt]['cofactors']\n print cofactors\n lm.set_factors(cofactor_snps)\n if type == 'emmax':\n eig_R = lm._get_eigen_R_(X=lm.X)\n reml_res = lm.get_REML(eig_L=eig_L, eig_R=eig_R)\n H_sqrt_inv = reml_res['H_sqrt_inv']\n l_res = lm._emmax_f_test_(kwargs['snps'], H_sqrt_inv, snp_priors=snp_priors,\n emma_num=emma_num)\n min_pval_i = l_res['ps'].argmin()\n mahalnobis_rss = l_res['rss'][min_pval_i]\n print 'Min Mahalanobis RSS:', mahalnobis_rss\n elif type == 'lm':\n l_res = lm.fast_f_test(kwargs['snps'])\n min_pval_i = l_res['ps'].argmin()\n\n min_pval = l_res['ps'][min_pval_i]\n\n min_pval_chr_pos = (kwargs['chromosomes'][min_pval_i], kwargs['positions'][min_pval_i])\n print 'Min p-value:', min_pval\n l_pvals = l_res['ps'].tolist()\n l_perc_var_expl = l_res['var_perc'].tolist()\n opt_indices[i_opt] = {'min_pval':min_pval, 'min_pval_chr_pos':min_pval_chr_pos,\n 'kolmogorov_smirnov':agr.calc_ks_stats(l_pvals),\n 'pval_median':agr.calc_median(l_pvals)}\n if file_prefix:\n opt_file_prefix = '%s_opt_%s' % (file_prefix, c)\n if snp_priors:\n ppa_cofactors = step_info_list[i_opt]['ppa_cofactors']\n ppas = l_res['ppas'].tolist()\n else:\n ppas = None\n ppa_cofactors = None\n opt_file_dict[i_opt], res = _plot_manhattan_and_qq_(opt_file_prefix, i_opt, l_pvals, quantiles_dict,\n plot_bonferroni=True, highlight_markers=cofactors,\n cand_genes=cand_genes, plot_xaxis=plot_xaxis,\n log_qq_max_val=log_qq_max_val, with_qq_plots=with_qq_plots,\n simple_qq=True, highlight_loci=highlight_loci,\n write_pvals=write_pvals, highlight_ppa_markers=ppa_cofactors,\n ppas=ppas, perc_var_expl=l_perc_var_expl, save_pvals=save_pvals,\n **kwargs)\n if save_pvals:\n opt_indices['res'] = opt_file_dict[i_opt]['res']\n\n if type == 'emmax':\n opt_indices[i_opt]['mahalanobis_rss'] = mahalnobis_rss\n opt_indices[i_opt]['res'] = res\n return ret_dict, opt_indices", "def optim_func(params, model):\n if model.model == 'ARD':\n model.alpha, model.beta = params\n lik = model.pruning_algorithm()\n\n else:\n model.alpha = params[0]\n lik = model.pruning_algorithm()\n \n return -lik", "def falcon():", "def alphabeta_search(state, d=1, cutoff_test=None, eval_fn=None, start_time=None, turn_number=None):\n global count\n global testing\n global BigInitialValue\n global MoveTimes\n\n player = state.to_move\n count = 0\n\n def max_value(state, alpha, beta, depth):\n global count, testing\n if testing:\n print(\" \"* depth, \"Max alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"* depth, \"Max cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = -BigInitialValue\n succ = state.game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"maxDepth: \", depth, \"Total:\", count, \"Successors: \", len(succ))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # A player can move repeatedly if opponent is completely blocked\n if state.to_move == s.to_move:\n v = max(v, max_value(s, alpha, beta, depth+1))\n else:\n v = max(v, min_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"* depth, \"max best value:\", v)\n if v >= beta:\n return v\n alpha = max(alpha, v)\n return v\n\n def min_value(state, alpha, beta, depth):\n global count\n if testing:\n print(\" \"*depth, \"Min alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"*depth, \"Min cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = BigInitialValue\n succ = state.game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"minDepth: \", depth, \"Total:\", count, \"Successors: \", len(succ))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # A player can move repeatedly if opponent is completely blocked\n if state.to_move == s.to_move:\n v = min(v, min_value(s, alpha, beta, depth+1))\n else:\n v = min(v, max_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"*depth, \"min best value:\", v)\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n\n def right_value(s, alpha, beta, depth):\n if s.to_move.id == state.to_move.id:\n return max_value(s, -BigInitialValue, BigInitialValue, 0)\n else:\n return min_value(s, -BigInitialValue, BigInitialValue, 0)\n\n def argmin(seq, fn):\n \"\"\"Return an element with lowest fn(seq[i]) score; tie goes to first one.\n >>> argmin(['one', 'to', 'three'], len)\n 'to'\n \"\"\"\n best = seq[0]; best_score = fn(best)\n for x in seq:\n x_score = fn(x)\n if x_score < best_score:\n best, best_score = x, x_score\n return best\n\n def argmax(seq, fn):\n \"\"\"Return an element with highest fn(seq[i]) score; tie goes to first one.\n >>> argmax(['one', 'to', 'three'], len)\n 'three'\n \"\"\"\n return argmin(seq, lambda x: -fn(x))\n\n # Body of alphabeta_search starts here:\n cutoff_test = (cutoff_test or\n (lambda state,depth: depth>d or state.game.terminal_test(state)))\n eval_fn = eval_fn or (lambda state: state.game.utility(state, turn_number))\n action, state = argmax(state.game.successors(state),\n lambda a_s: right_value(a_s[1], -BigInitialValue, BigInitialValue, 0))\n\n # calculate move time, round to 2 decimal places, store for analysis\n MoveTimes.append(round(time.time() - start_time, 2))\n return action", "def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]", "def abv(og, fg):\n return abw(og, fg) * fg / 0.794", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def MVAE_objective(ce_weight, modal_loss_funcs, recon_weights, input_to_float=True, annealing=1.0, criterion=torch.nn.CrossEntropyLoss()):\n recon_loss_func = elbo_loss(modal_loss_funcs, recon_weights, annealing)\n\n def allnonebuti(i, item):\n ret = [None for w in modal_loss_funcs]\n ret[i] = item\n return ret\n\n def actualfunc(pred, truth, args):\n training = args['training']\n reps = args['reps']\n fusedmu, fusedlogvar = args['fused']\n decoders = args['decoders']\n inps = args['inputs']\n reconsjoint = []\n\n if input_to_float:\n inputs = [i.float().cuda() for i in inps]\n else:\n inputs = [i.cuda() for i in inps]\n for i in range(len(inps)):\n reconsjoint.append(decoders[i](\n reparameterize(fusedmu, fusedlogvar, training)))\n total_loss = recon_loss_func(reconsjoint, inputs, fusedmu, fusedlogvar)\n for i in range(len(inps)):\n mu, logvar = reps[i]\n recon = decoders[i](reparameterize(mu, logvar, training))\n total_loss += recon_loss_func(allnonebuti(i, recon),\n allnonebuti(i, inputs[i]), mu, logvar)\n total_loss += ce_weight * criterioning(pred, truth, criterion)\n return total_loss\n return actualfunc", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def _UpdateCriteria(self):\n grad = self.traj.grad[-1]\n disp = self.traj.coords[-1] - self.traj.coords[-2]\n self.delta_e = self.traj.energy[-1] - self.traj.energy[-2]\n self.grad_max = numpy.amax(grad)\n self.disp_max = numpy.amax(disp)\n self.grad_rms = math.sqrt(numpy.mean(grad**2))\n self.disp_rms = math.sqrt(numpy.mean(disp**2))", "def solve(self):\n ...", "def test_extension(roi,name,nb,distmax,gradient=True,update=True,bandfits=True):\n \n #save previous position of the source.\n roi.save('roi_temp.dat')\n source=source=roi.get_source(which=name)\n Parameters=source.spatial_model.get_parameters()\n ra_puls=Parameters[0]\n dec_puls=Parameters[1]\n \n ll,TS=localize_func(roi,name,nb,gradient=True,update=True,bandfits=bandfits)\n\n source=source=roi.get_source(which=name)\n Parameters=source.spatial_model.get_parameters()\n ra=Parameters[0]\n dec=Parameters[1]\n sigma=Parameters[2]\n \n source=source=roi.get_source(which=name)\n \n source=source=roi.get_source(which=name)\n if (dist(ra_puls,dec_puls,ra,dec)-sigma)<distmax:\n print \"Source consistent with the position of the pulsar : distance =%.2f\"%dist(ra,dec,source.skydir.ra(),source.skydir.dec())\n else :\n print \"Source unconsistent with the position of the pulsar : distance =%.2f\"%dist(ra,dec,source.skydir.ra(),source.skydir.dec())\n roi=load(\"roi_temp.dat\")\n \n os.system(\"rm -rf roi_temp.dat\")\n \n return roi", "def parametersweep(basedir,configfile,acfdir='ACF',invtype='tik'):\n\n alpha_sweep=sp.logspace(-3.5,sp.log10(7),25)\n costdir = os.path.join(basedir,'Cost')\n ionoinfname=os.path.join(basedir,acfdir,'00lags.h5')\n ionoin=IonoContainer.readh5(ionoinfname)\n \n dirio = ('Spectrums','Mat','ACFMat')\n inputdir = os.path.join(basedir,dirio[0])\n \n dirlist = glob.glob(os.path.join(inputdir,'*.h5'))\n (listorder,timevector,filenumbering,timebeg,time_s) = IonoContainer.gettimes(dirlist)\n Ionolist = [dirlist[ikey] for ikey in listorder]\n \n RSTO = RadarSpaceTimeOperator(Ionolist,configfile,timevector,mattype='Sim')\n \n npts=RSTO.simparams['numpoints']\n \n ionospec=makeionocombined(dirlist)\n if npts==ionospec.Param_List.shape[-1]:\n tau,acfin=spect2acf(ionospec.Param_Names,ionospec.Param_List)\n nloc,ntimes=acfin.shape[:2]\n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n acfin_amb=sp.zeros((nloc,ntimes,np),dtype=acfin.dtype)\n # get the original acf\n \n \n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n \n for iloc,locarr in enumerate(acfin):\n for itime,acfarr in enumerate(locarr):\n acfin_amb[iloc,itime]=sp.dot(ambmat,acfarr)\n acfin_amb=acfin_amb[:,0]\n else:\n acfin_amb=ionospec.Param_List[:,0]\n \n if not os.path.isdir(costdir):\n os.mkdir(costdir)\n # pickle file stuff \n pname=os.path.join(costdir,'cost{0}-{1}.pickle'.format(acfdir,invtype))\n\n alpha_list=[]\n errorlist=[]\n errorlaglist=[]\n datadiflist=[]\n constlist=[]\n if 'perryplane' in basedir.lower() or 'SimpData':\n rbounds=[-500,500]\n else:\n rbounds=[0,500]\n\n alpha_list_new=alpha_sweep.tolist()\n for i in alpha_list:\n if i in alpha_list_new:\n alpha_list_new.remove(i)\n \n for i in alpha_list_new:\n ionoout,datadif,constdif=invertRSTO(RSTO,ionoin,alpha_list=i,invtype=invtype,rbounds=rbounds,Nlin=1)\n \n datadiflist.append(datadif)\n constlist.append(constdif)\n acfout=ionoout.Param_List[:,0]\n alpha_list.append(i)\n outdata=sp.power(sp.absolute(acfout-acfin_amb),2)\n aveerror=sp.sqrt(sp.nanmean(outdata,axis=0))\n errorlaglist.append(aveerror)\n errorlist.append(sp.nansum(aveerror))\n \n pickleFile = open(pname, 'wb')\n pickle.dump([alpha_list,errorlist,datadiflist,constlist,errorlaglist],pickleFile)\n pickleFile.close()\n mkalphalist(pname)\n alphaarr=sp.array(alpha_list)\n errorarr=sp.array(errorlist)\n errorlagarr=sp.array(errorlaglist)\n datadif=sp.array(datadiflist)\n constdif=sp.array(constlist)\n fig,axlist,axmain=plotalphaerror(alphaarr,errorarr,errorlagarr)\n fig.savefig(os.path.join(costdir,'cost{0}-{1}.png'.format(acfdir,invtype)))\n \n fig,axlist=plotLcurve(alphaarr,datadif,constdif)\n fig.savefig(os.path.join(costdir,'lcurve{0}-{1}.png'.format(acfdir,invtype)))", "def solver_auto_param(u_init, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, eta_step = 0.5, eta_step_tumor = 0.99, ftol = 1e-3, max_iter = 300, verbose = 0, nnls_max_iter=30):\n auto_param_obj_history = []\n auto_param_relaxed_obj_history = []\n \n eta_0 = (1/(2*np.max(B)))*0.5 #Initialize eta_0\n eta = np.array([eta_0/len(H)]*len(H))*0.9\n eta_lin = np.ones(L_lhs.shape[0])*0.01\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u_init, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 300, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Feasibility')\n count = 0\n num_violated = -1\n while (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n count += 1\n num_violated_prev = np.copy(num_violated)\n num_violated_oar = len(H) - cnstr['Relaxed'].sum()\n num_violated_lin = L_lhs.shape[0] - np.sum(cnstr_linear)#(1 - int(cnstr_linear))\n num_violated = len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))#(1 - int(cnstr_linear))\n \n print('Iter ', count, '# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n print(' Linear constraints on u violation:', L_lhs.shape[0] - np.sum(cnstr_linear))\n eta[cnstr['Relaxed'] == False] *= eta_step\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n # eta_lin *= eta_step\n \n if num_violated == num_violated_prev:\n print('Increase enforcement')\n if num_violated_lin > 0:\n eta_lin[cnstr_linear == False] *= eta_step\n # eta_0 *= eta_step*2\n #eta_lin *= eta_step\n if num_violated_oar > 0:\n eta[cnstr['Relaxed'] == False] *= eta_step\n # eta_0 *= eta_step*2\n \n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose, nnls_max_iter=nnls_max_iter)\n # solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n \n print('Enforcing Optimality')\n count = 0\n while not (len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear))):\n # (cnstr['Relaxed'].sum()-len(H)): #If nothing is violated -- enforce optimality!\n count += 1\n print('Opt Iter', count)\n obj_prev = obj_u_opt_N_fixed(u, T, alpha, B)\n u_prev = np.copy(u)\n eta_0 *= eta_step_tumor\n print('Current eta_0:', eta_0)\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, T, H, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter, verbose = verbose)\n u, w_0, w, w_lin, obj_history, relaxed_obj_history = solver(u, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = ftol, max_iter = max_iter//2, verbose = verbose, nnls_max_iter=nnls_max_iter)\n auto_param_obj_history.append(obj_history)\n auto_param_relaxed_obj_history.append(relaxed_obj_history)\n \n obj_new = obj_u_opt_N_fixed(u, T, alpha, B)\n if (abs(obj_new - obj_prev)/abs(obj_prev) <= 1e-4) or (obj_new > obj_prev): #two consequent iters, two times bc on iter 2 it stops anyway\n print('No improvement, increase enforcement')\n eta_step_tumor *= 0.1\n eta_0 *= eta_step_tumor\n if (2*eta_0)**2 <= 1e-80:\n print('zero reached')\n break\n # break\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))#(1 - int(cnstr_linear)))\n \n print('Finding the correct solution:')\n u = u_prev\n eta_0 = eta_0/eta_step_tumor\n \n cnstr = constraints_all(u, H, gamma, D, C, tol = 0.05, verbose = 0)\n cnstr_linear = linear_constraint(u, L_lhs, L_rhs, tol = 0.05)\n print('# of violated constr:', len(H) - cnstr['Relaxed'].sum() + (L_lhs.shape[0] - np.sum(cnstr_linear)))\n # print('# of violated constr:', cnstr['Relaxed'].sum()-len(H))\n print(\"OBJJJJJ:\", obj_u_opt_N_fixed(u, T, alpha, B))\n return u, w_0, w, w_lin, eta_0, eta, eta_lin, auto_param_obj_history, auto_param_relaxed_obj_history", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def initialize_refine(self, **kwargs):", "def __init__(self, F, m, B, a=None):\n if a is None: # don't make the stupid noob mistake of putting a=[]\n a = [] # in the function signature above.\n\n # Initialize constants.\n self.m = m\n d = F.degree()\n self.d = d\n self.n = m*d\n self.B = B\n self.gamma = hermite_constant(self.n-self.d)\n\n self.F = F\n self.Z_F = F.maximal_order()\n self.Foo = F.real_embeddings()\n self.dF = abs(F.disc())\n self.Fx = PolynomialRing(F, 'xF')\n\n self.beta = [[]]*m\n self.gnk = [[]]*m\n\n self.trace_elts = []\n\n Z_Fbasis = self.Z_F.basis()\n\n # Initialize variables.\n if a == []:\n # No starting input, all polynomials will be found; initialize to zero.\n self.a = [0]*m + [1]\n self.amaxvals = [[]]*m\n anm1s = [[i] for i in range(0,m//2+1)]\n for i in range(1,self.d):\n for j in range(len(anm1s)):\n anm1s[j] = [ anm1s[j] + [i] for i in range(m)]\n anm1s = sum(anm1s, [])\n anm1s = [sum([Z_Fbasis[i]*a[i] for i in range(self.d)]) for a in anm1s]\n # Minimize trace in class.\n import numpy\n for i in range(len(anm1s)):\n Q = [ [ v(m*x) for v in self.Foo] + [0] for x in Z_Fbasis] + [[v(anm1s[i]) for v in self.Foo] + [10**6]]\n pari_string = '['+';'.join([','.join([\"%s\"%ii for ii in row]) for row in zip(*Q)])+']'\n adj = pari(pari_string).qflll()[self.d]\n anm1s[i] += sum([m*Z_Fbasis[ii]*int(adj[ii])//int(adj[self.d]) for ii in range(self.d)])\n\n self.amaxvals[m-1] = anm1s\n self.a[m-1] = self.amaxvals[m-1].pop()\n self.k = m-2\n\n bl = math.ceil(1.7719*self.n)\n br = max([1./m*(am1**2).trace() + \\\n self.gamma*(1./(m**d)*self.B/self.dF)**(1./(self.n-d)) for am1 in anm1s])\n br = math.floor(br)\n T2s = self.F._positive_integral_elements_with_trace([bl,br])\n self.trace_elts.append([bl,br,T2s])\n\n elif len(a) <= m+1:\n # First few coefficients have been specified.\n # The value of k is the largest index of the coefficients of a which is\n # currently unknown; e.g., if k == -1, then we can iterate\n # over polynomials, and if k == n-1, then we have finished iterating.\n if a[len(a)-1] != 1:\n raise ValueError(\"a[len(a)-1](=%s) must be 1 so polynomial is monic\"%a[len(a)-1])\n\n raise NotImplementedError(\"These have not been checked.\")\n\n k = m-len(a)\n self.k = k\n a = [0]*(k+1) + a\n self.amaxvals = [[]]*m\n for i in range(0,n+1):\n self.a[i] = a[i]\n\n # Bounds come from an application of Lagrange multipliers in degrees 2,3.\n self.b_lower = [-1./m*(v(self.a[m-1]) +\n (m-1.)*math.sqrt(v(self.a[m-1])**2 - 2.*(1+1./(m-1))*v(self.a[m-2]))) for v in self.Foo]\n self.b_upper = [-1./m*(v(self.a[m-1]) -\n (m-1.)*math.sqrt(v(self.a[m-1])**2 - 2.*(1+1./(m-1))*v(self.a[m-2]))) for v in self.Foo]\n if k < m-2:\n bminmax = [lagrange_degree_3(n,v(self.a[m-1]),v(self.a[m-2]),v(self.a[m-3])) for v in self.Foo]\n self.b_lower = bminmax[0]\n self.b_upper = bminmax[1]\n\n # Annoying, but must reverse coefficients for numpy.\n gnk = [binomial(j,k+2)*a[j] for j in range(k+2,n+1)]\n self.beta[k+1] = [[self.b_lower] + numpy.roots([v(gnk[i]) for i in range(len(gnk))].reverse()).tolist().sort() + [self.b_upper] for v in self.Foo]\n\n # Now to really initialize gnk.\n self.gnk[k+1] = [[0] + [binomial(j,k+1)*v(a[j]) for j in range (k+2,m+1)] for v in self.Foo]\n else:\n # Bad input!\n raise ValueError(\"a has length %s > m+1\"%len(a))", "def front_column_model_p_gain():", "def fA(self):\n pass", "def part2a_0():\n xs = exampleInput\n phi = Counter({('-BEGIN-', '-FEAT-'): 1.0, ('-FEAT-', 'Beautiful'): 1.0, ('-FEAT-', 'PREV:-BEGIN-'): 1.0, ('-FEAT-', 'NEXT:2'): 1.0, ('-FEAT-', '-CAPITALIZED-'): 1.0, ('-FEAT-', '-POST-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(0, '-BEGIN-', '-FEAT-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n\n phi = Counter({('-FEAT-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:Beautiful'): 1.0, ('-SIZE-', 'NEXT:bedroom'): 1.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 1.0, ('-SIZE-', '2'): 1.0, ('-SIZE-', '-POST-CAPITALIZED-'): 0.0, ('-SIZE-', '-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(1, '-FEAT-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )\n \n phi = Counter({('-SIZE-', '-SIZE-'): 1.0, ('-SIZE-', 'PREV:2'): 1.0, ('-SIZE-', 'bedroom'): 1.0, ('-SIZE-', 'NEXT:-END-'): 1.0, ('-SIZE-', '-CAPITALIZED-'): 0.0, ('-SIZE-', '-PRE-CAPITALIZED-'): 0.0})\n phi_ = submission.nerFeatureFunction(2, '-SIZE-', '-SIZE-', xs)\n grader.requireIsTrue( Counters.approximateEquals(phi, phi_) )", "def test_vec_func2():\n\n c = [1,2]\n p = [1,1]\n def myfunc(x,y):\n a = EF.exp_base(2,x) #base 2 and exponent x\n b = EF.logistic(y)\n c = EF.log(y,2) #log with base 2\n return a + b + c\n\n f_obj=ADiff(myfunc)\n res=f_obj.pJac(c,p)\n\n expectAns={'diff': math.pow(2,c[0])+1/(1 + math.exp(-c[1]))*(1-(1/(1 + math.exp(-c[1]))))+1/((c[1])*math.log(2)), 'value': math.pow(2,c[0])+(1 / (1 + math.exp(-c[1])))+math.log(c[1],2)}\n\n assert res==expectAns", "def __init__(self, folder):\n print \"folder passed is \", folder\n self.folder = folder\n self.geometry = gf.geometry(self.folder)\n self.elements = gf.dictionary_set()\n self.area = np.zeros(shape = (8))\n self.Vol = (self.geometry.properties['span_number']*(self.geometry.properties['span_width']*\n self.geometry.properties['span_height'] + self.geometry.properties['cover_height']\n *self.geometry.properties['span_width']/2))\n self.F = np.zeros(shape = (8, 8))\n of.view_factor(self.geometry, self.F, self.area, self.Vol)\n tran = [self.geometry.properties['tra_cover_out'],0.0,0.0,\n self.geometry.properties['tra_sidewall_out'],\n self.geometry.properties['tra_cover_in'],\n self.geometry.properties['tra_sidewall_in'],0.0,0.0]\n emi = [self.geometry.properties['emi_cover_out'],1.0,1.0,\n self.geometry.properties['emi_sidewall_out'],\n self.geometry.properties['emi_cover_in'],\n self.geometry.properties['emi_sidewall_in'],1.0,1.0] \n self.tr, self.em, self.re = of.optictal_prop(tran,emi)\n if ((self.tr + self.em).any() > 1.0):\n print \"error in optical properties\"\n self.T = np.zeros(shape = (2,10))\n self.RH = np.zeros(shape = (2,10))\n # 8 inside,9 outside \n self.qcond = np.zeros(shape = (2,8))\n self.qconv = np.zeros(shape = (2,8))\n self.qrad = np.zeros(shape = (2,8))\n self.j = np.zeros(shape = (2,8))\n self.g = np.zeros(shape = (2,8))\n self.alpha = np.zeros(shape = (2,8))\n deltaT = 300\n RH_in = 0.6\n fg.set_initial_conditions(self.geometry.properties['t_air_inside'],\n 278,\n RH_in,self.T,self.RH , self.geometry.properties['t_air'],self.g,\n self.geometry.properties['sky_temp'])\n self.T, self.j, self.g, self.alpha, self.qrad, self.qconv = fg.solver_T(self.T,self.qrad,self.qconv,self.alpha,self.j,self.g,self.em,self.tr,\n self.geometry.properties['wind_speed'],\n self.F,self.geometry.properties['heat_flux'],1,1.0,self.area,\n self.geometry.properties['rho'],self.geometry.properties['cp'],\n self.Vol,self.geometry.properties['degree_window'],deltaT)", "def ce_fit(inp_image, ref_image, mask_image):\n\thist_res = Util.histc(ref_image, inp_image, mask_image)\n\targs = hist_res[\"args\"]\n\tscale = hist_res[\"scale\"]\n\tdata = [hist_res['data'], inp_image, hist_res[\"ref_freq_bin\"], mask_image, int(hist_res['size_img']), hist_res['hist_len']]\n\tres = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data)\n\tresu = [\"Final Parameter [A,B]:\", res[0], \"Final Chi-square :\", -1*res[1], \"Number of Iteration :\", res[2]]\n\tcorrected_image = inp_image*res[0][0] + res[0][1]\n\tresult = [resu,\"Corrected Image :\",corrected_image]\n\tdel data[:], args[:], scale[:]\n\treturn result", "def run(self, function, beta):\n if self.info_requested(Info.ok):\n self.info_set(Info.ok, False)\n\n# step = function.step(beta)\n\n z = betanew = betaold = beta\n\n if self.info_requested(Info.time):\n t = []\n if self.info_requested(Info.fvalue):\n f = []\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, False)\n\n for i in xrange(1, max(self.min_iter, self.max_iter) + 1):\n\n if self.info_requested(Info.time):\n tm = utils.time_cpu()\n\n z = betanew + ((i - 2.0) / (i + 1.0)) * (betanew - betaold)\n\n step = function.step(z)\n\n betaold = betanew\n betanew = function.prox(z - step * function.grad(z),\n step)\n\n if self.info_requested(Info.time):\n t.append(utils.time_cpu() - tm)\n if self.info_requested(Info.fvalue):\n f.append(function.f(betanew))\n\n if self.conesta_stop is not None:\n mu_min = self.conesta_stop[0]\n# print \"mu_min:\", mu_min\n mu_old = function.set_mu(mu_min)\n# print \"mu_old:\", mu_old\n stop_step = function.step(betanew)\n# print \"step :\", step\n # Take one ISTA step for use in the stopping criterion.\n stop_z = function.prox(betanew - stop_step \\\n * function.grad(betanew),\n stop_step)\n function.set_mu(mu_old)\n# print \"err :\", maths.norm(betanew - z)\n# print \"sc err:\", (1.0 / step) * maths.norm(betanew - z)\n# print \"eps :\", self.eps\n\n if (1. / stop_step) * maths.norm(betanew - stop_z) < self.eps \\\n and i >= self.min_iter:\n\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, True)\n\n break\n\n else:\n if step > 0.0:\n if (1.0 / step) * maths.norm(betanew - z) < self.eps \\\n and i >= self.min_iter:\n\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, True)\n\n break\n\n else: # TODO: Fix this!\n if maths.norm(betanew - z) < self.eps \\\n and i >= self.min_iter:\n\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, True)\n\n break\n\n self.num_iter = i\n\n if self.info_requested(Info.num_iter):\n self.info_set(Info.num_iter, i)\n if self.info_requested(Info.time):\n self.info_set(Info.time, t)\n if self.info_requested(Info.fvalue):\n self.info_set(Info.fvalue, f)\n if self.info_requested(Info.ok):\n self.info_set(Info.ok, True)\n\n return betanew", "def auxmax_f1_part_i(x,m_ind):\n \n tmp1 = 2.0*auxmaxrho1(x,m_ind)-cfg.a[m_ind,cfg.nfea-1] \n tmp2 = 2.0*auxmaxrho2(x,m_ind)+cfg.a[m_ind,cfg.nfea-1]\n\n # checking the maximum used in auxmaxrho1 \n if (tmp1 > tmp2):\n f = tmp1\n cfg.alpha1[m_ind] = 1 # alpha1 should be ok here. We do not solve aux and real problems at the same time. \n elif (tmp1 < tmp2):\n f = tmp2\n cfg.alpha1[m_ind] = 0 \n else:\n f = tmp2\n cfg.alpha1[m_ind] = 2 \n\n return f", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def idealOpAmp():", "def __init__(self, mesh, bndry, interface, dt, theta, v_max, lambda_s, mu_s, rho_s, \n mu_f, rho_f, result, *args, **kwargs):\n\n self.mesh = mesh\n self.dt = Constant(dt)\n self.theta = theta\n self.t = 0.0\n self.v_max = v_max\n\n self.mu_f = mu_f\n self.rho_f = rho_f\n self.lambda_s = lambda_s\n self.mu_s = mu_s\n self.rho_s = rho_s\n \n self.bndry = bndry\n self.interface = interface\n\n # bounding box tree\n self.bb = BoundingBoxTree()\n self.bb.build(self.mesh)\n\n # Define finite elements\n eV = VectorElement(\"CG\", mesh.ufl_cell(), 2)\t\t# velocity element\n eB = VectorElement(\"Bubble\", mesh.ufl_cell(), mesh.geometry().dim()+1) # Bubble element\n eU = VectorElement(\"CG\", mesh.ufl_cell(), 2)\t\t# displacement element\n eP = FiniteElement(\"DG\", mesh.ufl_cell(), 1)\t\t# pressure element\n\n eW = MixedElement([eV, eB, eU, eB, eP]) # final mixed element\n W = FunctionSpace(self.mesh, eW) # mixed space\n self.W = W\n self.V = FunctionSpace(self.mesh, eV)\n\n # Set boundary conditions\n self.v_in = Expression((\"t<2.0? 0.5*(1.0 - cos(0.5*pi*t))*v_max*4/(gW*gW)*(x[1]*(gW - x[1])): \\\n v_max*4/(gW*gW)*(x[1]*(gW - x[1]))\", \"0.0\"),\n degree = 2, v_max = Constant(self.v_max), gW = Constant(gW), t = self.t)\n\n #info(\"Expression set.\")\n bc_v_in = DirichletBC(self.W.sub(0), self.v_in, bndry, _INFLOW)\n bc_v_walls = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _WALLS)\n bc_v_circle = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _CIRCLE)\n bc_u_in = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _INFLOW)\n bc_u_circle = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _CIRCLE)\n bc_u_walls = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _WALLS)\n bc_u_out = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _OUTFLOW)\n self.bcs = [bc_v_in, bc_v_walls, bc_v_circle, bc_u_in, bc_u_walls, bc_u_circle, bc_u_out]\n\n #info(\"Mesh BC.\")\n bc_mesh = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), interface, _FSI)\n self.bcs_mesh = [bc_mesh]\n\n\n #info(\"Normal and Circumradius.\")\n self.n = FacetNormal(self.mesh)\n self.h = Circumradius(self.mesh)\n I = Identity(self.W.mesh().geometry().dim())\n\n # Define functions\n self.w = Function(self.W) # solution to current time step\n self.w0 = Function(self.W) # solution from previous time step\n\n (v__, bv_, u__, bu_, p_) = TestFunctions(self.W)\n\n # sum bubble elements with corresponding Lagrange elements\n v_ = v__ + bv_\n u_ = u__ + bu_\n (v, bv, u, bu, self.p) = split(self.w)\n self.v = v + bv\n self.u = u + bu\n (v0, bv0, u0, bu0, self.p0) = split(self.w0)\n self.v0 = v0 + bv0\n self.u0 = u0 + bu0\n\n\n # define deformation gradient, Jacobian\n self.FF = I + grad(self.u)\n self.FF0 = I + grad(self.u0)\n self.JJ = det(self.FF)\n self.JJ0 = det(self.FF0)\n\n # write ALE mesh movement \n self.gamma = 9.0/8.0\n h = CellVolume(self.mesh)**(self.gamma)\n E = Constant(1.0)\n\n E_mesh = E/h\n nu_mesh = Constant(-0.02)\n\n mu_mesh = E_mesh/(2*(1.0+nu_mesh))\n lambda_mesh = (nu_mesh*E_mesh)/((1+nu_mesh)*(1-2*nu_mesh))\n\n F_mesh = inner(mu_mesh*2*sym(grad(self.u)), grad(u_))*dx(0) \\\n + lambda_mesh*inner(div(self.u), div(u_))*dx(0)\n\n\n # define referential Grad and Div shortcuts\n def Grad(f, F): return dot( grad(f), inv(F) )\n def Div(f, F): return tr( Grad(f, F) )\n\n # approximate time derivatives\n du = (1.0/self.dt)*(self.u - self.u0)\n dv = (1.0/self.dt)*(self.v - self.v0)\n\n # compute velocuty part of Cauchy stress tensor for fluid\n self.T_f = -self.p*I + 2*self.mu_f*sym(Grad(self.v, self.FF))\n self.T_f0 = -self.p*I + 2*self.mu_f*sym(Grad(self.v0, self.FF0))\n\n # Compute 1st Piola-Kirhhoff tensro for fluid \n # - for computing surface integrals for forces in postprocessing \n self.S_f = self.JJ *self.T_f*inv(self.FF).T\n \n # write equations for fluid\n a_fluid = inner(self.T_f , Grad(v_, self.FF))*self.JJ*dx(0) \\\n - inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \\\n + inner(self.rho_f*Grad(self.v, self.FF )*(self.v - du), v_)*self.JJ*dx(0)\n a_fluid0 = inner(self.T_f0, Grad(v_, self.FF0))*self.JJ0*dx(0) \\\n - inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \\\n + inner(self.rho_f*Grad(self.v0, self.FF0)*(self.v0 - du), v_)*self.JJ0*dx(0)\n\n b_fluid = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)\n b_fluid0 = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)\n\n self.F_fluid = (self.theta*self.JJ+(1.0 - self.theta)*self.JJ0)*self.rho_f*inner(dv, v_)*dx(0)\\\n + self.theta*(a_fluid + b_fluid) + (1.0 - self.theta)*(a_fluid0 + b_fluid0) \\\n + F_mesh\n\n # compute 1st Piola-Kirchhoff tensor for solid (St. Vennant - Kirchhoff model)\n B_s = self.FF.T *self.FF\n B_s0 = self.FF0.T*self.FF0\n S_s = self.FF *(0.5*self.lambda_s*tr(B_s - I)*I + self.mu_s*(B_s - I))\n S_s0 = self.FF0*(0.5*self.lambda_s*tr(B_s0 - I)*I + self.mu_s*(B_s0 - I))\n\n # write equation for solid\n alpha = Constant(1.0) # Constant(1e10) #\n self.F_solid = rho_s*inner(dv, v_)*dx(1) \\\n + self.theta*inner(S_s , grad(v_))*dx(1) + (1.0 - self.theta)*inner(S_s0, grad(v_))*dx(1) \\\n + alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)\n\n\n dF_solid = derivative(self.F_solid, self.w)\n dF_fluid = derivative(self.F_fluid, self.w)\n\n self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)\n self.solver = NewtonSolver()\n\n # configure solver parameters\n self.solver.parameters['relative_tolerance'] = 1e-6\n self.solver.parameters['maximum_iterations'] = 15\n self.solver.parameters['linear_solver'] = 'mumps'\n\n # create files for saving\n if my_rank == 0:\n if not os.path.exists(result):\n os.makedirs(result)\n self.vfile = XDMFFile(\"%s/velocity.xdmf\" % result)\n self.ufile = XDMFFile(\"%s/displacement.xdmf\" % result)\n self.pfile = XDMFFile(\"%s/pressure.xdmf\" % result)\n self.sfile = XDMFFile(\"%s/stress.xdmf\" % result)\n self.vfile.parameters[\"flush_output\"] = True\n self.ufile.parameters[\"flush_output\"] = True\n self.pfile.parameters[\"flush_output\"] = True\n self.sfile.parameters[\"flush_output\"] = True\n with open(result+'/data.csv', 'w') as data_file:\n writer = csv.writer(data_file, delimiter=';', lineterminator='\\n')\n writer.writerow(['time', 'mean pressure on outflow', 'pressure_jump', \n 'x-coordinate of end of beam', 'y-coordinate of end of beam',\n 'pressure difference', \n 'drag_circle', 'drag_fluid', 'drag_solid', 'drag_fullfluid',\n 'lift_circle', 'lift_fluid', 'lift_solid', 'lift_fullfluid'])", "def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()", "def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def optimizeposition(areas, omegas, x0, x1, z0, z1):\n\n # initial position of each quadpoint is at the center\n # of the edge connecting the midpoint and a corner point\n rhos = 0.5 * ones(4)\n a = 1 / sqrt(3)\n deltarhos = 0.25 * ones(4) # delta for finite differences\n\n while True: # while method has not converged\n # print(\"################## new iteration #############\")\n rhs = f(rhos, omegas, a, x0, x1, z0, z1, areas)\n print(\"##\")\n print(rhs)\n print(rhos)\n if norm(rhs) < 1e-5:\n break\n mat = df(rhos, omegas, a, x0, x1, z0, z1, areas, deltarhos)\n update = solve(mat, rhs)\n\n rhos += update\n # for i in range(4):\n # rhos[i] = max(0,min(1,rhos[i]))\n \"\"\"\n print(\"the norm of the rhs is \")\n print(norm(rhs))\n print(mat)\n print(\"rhs\")\n print(rhs)\n print(update)\n print(\"rhos\")\n print(rhos)\n \"\"\"\n # print(alpha)\n return rhos", "def calc_chasa_solv2(mol, atoms, fai, residue_names, numint_loos, numvirt_loos, bbtot, numbb,ext_atoms, solv_list):\n #atoms = mol.atoms\n #fai = mol.residue_first_atom_indices\n #residue_names = mol.residue_names\n minres, maxres = construct.get_res_extents(mol)\n\n use_ext = 0\n ext_coords = None\n use_data = 1\n data = zeros(len(atoms), 'd')\n \n if ext_atoms:\n ext_coords = []\n map(lambda x: map(lambda y: ext_coords.append(y), x), ext_atoms)\n ext_coords = array(ext_coords, 'd')\n use_ext = len(ext_atoms)\n\n flags = construct.make_asa_list(mol)\n probe = 1.4\n ndiv = 3\n ext_radius = 1.4\n tot_asa = asa_evaluate(atoms, data, ext_coords, flags, probe,\n ext_radius, use_data, use_ext, ndiv)\n p_solv_nrg = 0.0\n ap_solv_nrg = 0.0\n Gamma_p = 3.0/5.0\n Gamma_hb_oxy = 0.6\n Gamma_ap = 0.03\n CHASA = 0.0\n for i in xrange(minres,maxres):\n rname = residue_names[i]\n start = fai[i]\n end = fai[i+1]\n occ = 0.0\n for j in range(start, end):\n atom = atoms[j]\n residue_num = int(mol.res_pdb_number[atom.resnum])\n if atom.name == ' N ':\n if solv_list[i][0][2] > 0:\n p_solv_nrg = p_solv_nrg - (Gamma_p *(solv_list[i][0][2]))\n# elif solv_list[i][0][2] < 0:\n# p_solv_nrg = p_solv_nrg + non_hbd_score\n\n elif atom.name == ' O ':\n if solv_list[i][1][2] > 0:\n if solv_list[i][1][3] == 0:\n p_solv_nrg = p_solv_nrg - (Gamma_p *(solv_list[i][1][2]))\n elif solv_list[i][1][3] > 0:\n p_solv_nrg = p_solv_nrg - (Gamma_hb_oxy)\n# elif solv_list[i][1][2] < 0:\n# p_solv_nrg = p_solv_nrg + non_hbd_score\n\n elif 'C' in atom.name:\n ap_solv_nrg = ap_solv_nrg + (Gamma_ap * data[j])\n# CHASA = CHASA + data[j]\n\n tot_solv_nrg = ap_solv_nrg + p_solv_nrg\n# print ap_solv_nrg, p_solv_nrg\n\n return tot_solv_nrg", "def errAFunc(a,aexpt,name):\n\tif 'hcp' in name: multFactor = 1\n\telif 'bcc' in name: multFactor = 3**(0.5)/2.\n\telse:\t\t\t\tmultFactor = 2**(-0.5) #trigonal-shaped primitive cells\n\treturn a - multFactor*aexpt", "def mainsens_exact(model,func_params):\n if (model=='sobol'):\n dim=func_params.shape[0]\n mainsens=np.empty((dim,))\n var=1.0\n for i in range(dim):\n mainsens[i]=1./(3.*(1.+func_params[i])**2)\n var*=(mainsens[i]+1.)\n var-=1.0\n mainsens/=var\n\n elif (model=='ishigami'):\n a=func_params[0]\n b=func_params[1]\n var=a**2/8.+b*np.pi**4/5.+b**2*np.pi**8/18.+0.5\n mainsens=np.empty((3,))\n mainsens[0]=b*np.pi**4/5.+b**2*np.pi**8/50.+0.5\n mainsens[1]=a**2/8.\n mainsens[2]=0.0\n mainsens/=var\n\n elif (model=='poly_exsens'):\n dim=func_params[0]\n mainsens=(0.2/(1.2**dim-1))*np.ones((dim,))\n\n else:\n print('No exact sensitivity available for this function. Exiting.')\n sys.exit(1)\n\n\n return mainsens", "def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),\n fitargs=(), regresults=True):\n if not regresults: # pragma: no cover\n # TODO: update docstring\n raise NotImplementedError(\"option `regresults=False` not ported \"\n \"from upstream. _autolag always returns \"\n \"a tuple (icbest, bestlag, results)\")\n\n # TODO: can tcol be replaced by maxlag + 2?\n # TODO: This could be changed to laggedRHS and exog keyword arguments if\n # this will be more general.\n\n results = {}\n method = method.lower()\n for lag in range(startlag, startlag + maxlag + 1):\n mod_instance = mod(endog, exog[:, :lag], *modargs)\n results[lag] = mod_instance.fit()\n\n if method == \"aic\":\n icbest, bestlag = min((v.aic, k) for k, v in results.items())\n elif method == \"bic\":\n icbest, bestlag = min((v.bic, k) for k, v in results.items())\n elif method == \"t-stat\":\n # stop = stats.norm.ppf(.95)\n stop = 1.6448536269514722\n for lag in range(startlag + maxlag, startlag - 1, -1):\n icbest = np.abs(results[lag].tvalues[-1])\n if np.abs(icbest) >= stop:\n bestlag = lag\n icbest = icbest\n break\n else: # pragma: no cover\n raise ValueError(\"Information Criterion %s not understood.\" % method)\n\n return icbest, bestlag, results", "def getEG(n,int_method,func) :\n m = np.asarray([0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,2,2.5,3,3.5,4,\n 4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10])\n bes = np.asarray([0.5,0.47768,0.44879,0.39831,0.25858,0,0.15502,0.25699,\n 0.30896,0.35245,0.39119,0.51822,0.53678,0.54984,0.55847,\n 0.56395,0.57054,0.57950,0.58402,0.58765,0.59512,0.60214,\n 0.60469,0.61143,0.61789,0.62443,0.63097,0.63694])\n p = np.asarray([1,0.85417,0.94685,1.04467,2.55052,0,1.59086,\n 1.00670,0.88866,0.83763,0.81030,0.76108,0.83093,0.86863,\n 0.89233,0.90909,0.92097,0.93007,0.93735,0.94332,0.94813,\n 0.95193,0.95557,0.95864,0.96107,0.96360,0.96570,\n 0.96788])\n h0 = np.asarray([0,-0.03567,-0.04808,-0.04315,-0.01879,0,0.00041,0.00069,\n 0.00639,0.01405,0.02294,0.07814,0.13994,0.19278,0.23793,\n 0.27678,0.31039,0.33974,0.36585,0.38917,0.41003,0.42891,\n 0.44621,0.46195,0.47644,0.48982,0.50223,0.51379])\n h1 = np.asarray([0,0.26899, 0.10571,0.01763,-0.39382,0,0.15211,0.05665,\n 0.00933,-0.02791,-0.05876,-0.16720,-0.13033,-0.10455 ,\n -0.08618,-0.07208,-0.06179,-0.05369,-0.04715,-0.04176,\n -0.03742,-0.03408,-0.03081,-0.02808,-0.02599,-0.02375,\n -0.02194,-0.02004])\n h2 = np.asarray([0,-0.09016,-0.06893,-0.04971,-0.08828,0,-0.03341,\n -0.03964,-0.04456,-0.04775,-0.04984,-0.05381,-0.03570,\n -0.02476,-0.01789,-0.01333,-0.01028,-0.00812,-0.00653,\n -0.00534,-0.00444,-0.00376,-0.00319,-0.00274,-0.00238,\n -0.00207,-0.00182,-0.00160])\n h3 = np.asarray([0,0.03993,0.03363,0.02216,-0.00797,0,0.00899,0.01172,\n 0.01150,0.01026,0.00860,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,\n 0])\n \n if (func == True) :\n int_bes = splev(n,splrep(m, bes))\n int_p = splev(n,splrep(m, p))\n int_h0 = splev(n,splrep(m, h0))\n int_h1 = splev(n,splrep(m, h1))\n int_h2 = splev(n,splrep(m, h2))\n int_h3 = splev(n,splrep(m, h3))\n else :\n int_bes = griddata(m, bes, n, method=int_method)\n int_p = griddata(m, p, n, method=int_method)\n int_h0 = griddata(m, h0, n, method=int_method)\n int_h1 = griddata(m, h1, n, method=int_method)\n int_h2 = griddata(m, h2, n, method=int_method)\n int_h3 = griddata(m, h3, n, method=int_method)\n \n return np.asarray([int_bes,int_p,int_h0,int_h1,int_h2,int_h3])", "def apply(self) -> None:", "def apply(self) -> None:", "def refugia_adj_5_full(params, ns):\n #22 parameters \n nu1_1a, nu1_1b, nu1_2, nu1_3, nuA_a, nuA_b, nu2_2, nu2_3, nu3_2, nu3_3, m1_12, m1_21, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1a, T1b, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1a = [nu1_1a, nuA_a]\n fs.integrate(nu_T1a, T1a)\n ## Population function and migration matrix for T1b\n nu_T1b = [nu1_1b, nuA_b] \n mig1 = numpy.array([[0, m1_12],[m1_21, 0]])\n fs.integrate(nu_T1b, T1b, m=mig1) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1_2, nu2_2, nu3_2]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1_3, nu2_3, nu3_3]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3) \n return fs", "def fun_a(self):\n pass", "def func():", "def exo2():", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def get_sol(self):", "def _advanced(args, config):\n info('running on advanced function')\n\n config['gene'] = args.gene_name\n config['adj_gene'] = args.adjust\n config['refer_normal'] = args.refer_normal\n config[\"protein_gene\"] = _gene_ann(args.gene_annotation)\n\n if config['gene']:\n exp_mat = _read_in(config)\n config['exp_mat'] = exp_mat\n config[\"protein_gene\"] = list(\n set(config[\"protein_gene\"]) & set(exp_mat.columns.tolist()))\n else:\n info('gene not given')\n sys.exit(1)\n advanced._run_advanced(config)", "def __call__(self, E, a):\n # get the nearest grid locations for the energy -> masked array\n E_idx = np.searchsorted(self.energy.to('eV').flat,\n E.to('eV').flat)[:, None]\n mE_idx = np.ma.array(\n E_idx - 1,\n mask=np.logical_or(E_idx == 0,\n E_idx == self.energy.size))\n # compute the weight factor\n E_w = np.log(E / np.ma.take(self.energy, mE_idx) / E.units) \\\n / np.ma.take(self._E_log_steps, mE_idx)\n\n # get the nearest grid locations for the angle -> masked array\n qu = self.q.units\n search_a = ur.wraps(None, [qu, qu])(np.searchsorted)\n a_idx = search_a(self.q, a)\n ma_idx = np.ma.array(\n a_idx - 1,\n mask=np.logical_or(a_idx == 0,\n a_idx == self.q.size))\n # compute the weight factor\n a_w = (a - np.ma.take(self.q, ma_idx)) \\\n / np.ma.take(self._q_steps, ma_idx)\n\n # take elements from a masked NdArray\n def take(a, *ix):\n i = np.meshgrid(*ix[::-1])[::-1]\n m = reduce(np.logical_or, [j.mask for j in i])\n return np.ma.array(a[[j.filled(0) for j in i]], mask=m)\n\n new_cs = (1 - E_w) * (1 - a_w) * take(self.cs, mE_idx, ma_idx) \\\n + E_w * (1 - a_w) * take(self.cs, mE_idx + 1, ma_idx) \\\n + (1 - E_w) * a_w * take(self.cs, mE_idx, ma_idx + 1) \\\n + E_w * a_w * take(self.cs, mE_idx + 1, ma_idx + 1)\n\n # set values outside the range to zero\n return new_cs.filled(0.0) * self.cs.units", "def auxmax_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If k_ind = nomax-1, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1: #\n print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n return f_cc\n else:\n print \"hihu1\",line_start\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n \n # The last line.\n if k_ind==cfg.nomax-1:\n \n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n \n \n return f_cc", "def extension_func(roi,name,nb,gradient=True,update=True,bandfits=True):\n roi.fit(method=\"minuit\",use_gradient=True)\n if nb>1:\n for i in range(nb):\n roi.fit_extension(which=name,use_gradient=True)\n roi.fit(method=\"minuit\",use_gradient=True)\n else :\n roi.fit_extension(which=name,update=update,bandfits=bandfits)\n roi.fit(method=\"minuit\",use_gradient=True)\n \n ll=-1.0*roi.logLikelihood(roi.parameters())\n return ll,roi.TS(which=name,quick=False,method=\"minuit\")", "def solver(output_folder, prior_filename, data_filename, Lpost, dpost, resume=True, test_plot=False):\n\n def log_prior(cube, ndim, nparams):\n cube[0] = cube[0]*(F_lim[1] - F_lim[0]) + F_lim[0]\n cube[1] = cube[1]*(A_lim[1] - A_lim[0]) + A_lim[0]\n cube[2] = cube[2]*(Arel_lim[1] - Arel_lim[0]) + Arel_lim[0]\n cube[3] = cube[3]*(Ti_lim[1] - Ti_lim[0]) + Ti_lim[0]\n\n for idx, (w, amp_lim) in enumerate(zip(w_extra, Arel_extra), 4):\n cube[idx] = cube[idx]*(amp_lim[1] - amp_lim[0]) + amp_lim[0]\n\n def log_likelihood(cube, ndim, nparams):\n # I want to fix this at some point\n # i = random.randint(0, nL-1)\n i = np.random.choice(nL)\n L = Lpost[i]\n d = dpost[i]\n # L = 0.380173301412519577E+05\n # d = 0.883628502371783142E+00\n # amps, w, mass, V, Ti = build_function_parameters(cube, nparams)\n\n amps = [cube[1]*cube[2], cube[1]]\n w = list(w0)\n mass = list(mu)\n Ti = [0.025, cube[3]]\n V = [0.0, 0.0]\n\n vals = forward_model(r, L, d, cube[0], w, mass, amps, Ti,\n V, nlambda=2000)\n #vals = offset_forward_model(r, L, d, cube[0], w, mass, amps, Ti,\n # V, sm_ang=False, nlambda=2000, coeff=0.4)\n # trying to q offset here\n #vals += cube[1] * 0.15 / (1.0 + cube[0])\n\n chisq = np.sum((vals - sig)**2 / error**2)\n return -chisq / 2.0\n\n def build_function_parameters(cube, nparams):\n \"\"\"\n Helper function for building some intermediate lists of parameters\n needed for the forward q.\n\n Note that you need to be careful with the cube parameter. It is not a\n python list! I believe it is some kind of fortran array. For example,\n you cannot call len() on it.\n \"\"\"\n amps = [0.0 for _ in range(nparams-4+2)]\n amps[0] = cube[2]\n amps[1] = 1.0\n for idx, x in enumerate(amps[2:], 2):\n amps[idx] = cube[idx+2]\n #amps.extend([x for x in list(cube[4:])])\n amps = [x * cube[1] for x in amps]\n\n w = [x for x in w0]\n w += w_extra\n\n mass = [x for x in mu]\n mass += [mu[0] for _ in w_extra]\n\n V = [0.0 for _ in mass]\n\n #Ti = [0.025*1000.0/300.0, cube[3]]\n #Ti += [0.025*1000.0/300.0 for _ in w_extra]\n\n Ti = [0.025 for _ in w]\n Ti[1] = cube[3]\n\n return amps, w, mass, V, Ti\n\n with open(prior_filename, 'r') as infile:\n prior = json.load(infile, parse_float=np.float64)\n\n data = io.h5_2_dict(data_filename)\n\n nL = len(Lpost)\n ix = data['fit_ix']['0'][0:-1:3]\n r = data['r'][ix]\n sig = data['sig'][ix]\n error = data['sig_sd'][ix]\n\n F_lim = prior['F_lim']\n A_lim = (0.6*np.max(sig), 1.4*np.max(sig))\n Arel_lim = prior['Arel_lim']\n Ti_lim = prior['Ti_lim']\n w_extra = prior['w_extra']\n Arel_extra = prior['Arel_extra']\n\n assert len(w_extra) == len(Arel_extra)\n\n n_params = 4 + len(w_extra)\n folder = abspath(output_folder)\n\n print('There are {0:d} paremeters for MultiNest'.format(n_params))\n\n if test_plot:\n npts = 30\n test_sig = np.zeros((npts, len(r)))\n for i in range(npts):\n j = random.randint(0, nL-1)\n L = Lpost[j]\n d = dpost[j]\n cube = [random.random() for _ in range(n_params)]\n log_prior(cube, None, None)\n amps, w, mass, V, Ti = build_function_parameters(cube, n_params)\n test_sig[i, :] = forward_model(r, L, d, cube[0], w, mass, amps, Ti,\n V, sm_ang=False, nlambda=2000)\n\n # fig, ax = plt.subplots()\n # for i in xrange(npts):\n # ax.plot(r, test_sig[i, :], 'C0')\n # ax.errorbar(r, sig, yerr=error, fmt='', ecolor='C2', color='C1')\n # plt.show()\n else:\n pymultinest.run(log_likelihood, log_prior, n_params, importance_nested_sampling=False,\n resume=resume, verbose=True, sampling_efficiency='model', n_live_points=100,\n outputfiles_basename=join(folder, 'finesse_'))", "def MidpointFnBuilder(max_speed = 26.8, gain = 0.1, beta = 0.5, duration = 500, bias = 1.0, ratio = 0.5):\n\n def MidpointFn((idx, car), sim, step):\n \"\"\"\n :param idx:\n :param car:\n :param sim:\n :param step:\n :return:\n \"\"\"\n vehID = car[\"id\"]\n\n try:\n [back_car, front_car] = sim.getCars(idx, numBack=1, numForward=1, lane=car[\"lane\"])\n except ValueError:\n # Not enough cars on lane\n return\n\n front_dist = (front_car[\"x\"] - car[\"x\"]) % sim.length\n back_dist = (car[\"x\"] - back_car[\"x\"]) % sim.length\n\n curr_speed = car[\"v\"]\n front_speed = front_car[\"v\"]\n follow_dist = (front_dist + back_dist) * ratio\n delta = front_dist - follow_dist\n # print delta, curr_speed, front_speed, curr_speed-front_speed\n if follow_dist < front_dist and curr_speed < max_speed:\n # speed up\n new_speed = min(curr_speed + beta * (front_speed-curr_speed) + gain * delta + bias, max_speed)\n traci.vehicle.slowDown(vehID, new_speed, duration) # 2.5 sec\n # print \"t=%d, FASTER, %0.1f -> %0.1f (%0.1f) | d=%0.2f = %0.2f vs %0.2f\" % \\\n # (step, curr_speed, new_speed, front_speed, delta, front_dist, follow_dist)\n elif follow_dist > front_dist:\n # slow down\n new_speed = max(curr_speed + beta * (front_speed-curr_speed) + gain * delta + bias, 0)\n traci.vehicle.slowDown(vehID, new_speed, duration) # 2.5 sec\n # print \"t=%d, SLOWER, %0.1f -> %0.1f (%0.1f) | d=%0.2f = %0.2f vs %0.2f\" % \\\n # (step, curr_speed, new_speed, front_speed, delta, front_dist, follow_dist)\n\n return MidpointFn", "def fol_fc_ask(KB, alpha):\n while True:\n new = {}\n for r in KB.clauses:\n ps, q = parse_definite_clause(standardize_variables(r))\n raise NotImplementedError", "def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x", "def execMaxpTabu(y, w, threshold=100.0, maxit=2, tabuLength=5, typeTabu=\"exact\"):\n print(\"Running max-p-regions model (Duque, Anselin and Rey, 2010)\")\n print(\"Local search method: Tabu Search\")\n print(\"Number of areas: \", len(y))\n print(\"threshold value: \", threshold)\n distanceType = \"EuclideanSquared\"\n distanceStat = \"Centroid\";\n objectiveFunctionType = \"SS\";\n selectionType = \"Minimum\";\n numRegionsType = \"EndogenousThreshold\";\n\n # CONSTRUCTION PHASE 1: GROWING FEASIBLE REGIONS\n\n start = tm.time()\n\n # print w\n # print y\n\n am = AreaManager(w, y, distanceType)\n maxP = 0\n bestCandidates = {}\n for i in range(maxit):\n\n # print \"**** Iteration %d of %d ...\"%(i+1,maxit)\n\n rm = RegionMaker(am,\n distanceType = distanceType,\n distanceStat = distanceStat,\n selectionType = selectionType,\n objectiveFunctionType = objectiveFunctionType,\n numRegionsType = numRegionsType,\n threshold = threshold)\n numRegions = len(rm.feasibleRegions)\n rm.getObj()\n\n # print \"rm.feasibleRegions\",rm.feasibleRegions\n # print \"obj\",rm.getObj()\n\n if numRegions > maxP:\n bestCandidates = {}\n maxP = numRegions\n obj = rm.objInfo\n bestCandidates[obj] = rm.feasibleRegions\n if numRegions == maxP:\n obj = rm.objInfo\n if obj in bestCandidates:\n pass\n else:\n bestCandidates[obj] = rm.feasibleRegions\n else:\n pass\n\n # print \"bestCandidates\", bestCandidates\n\n ofValues = list(bestCandidates.keys())\n basicMemory = BasicMemory()\n while len(ofValues) >= 1:\n\n # RECREATE SOLUTION\n\n rm.resetNow()\n minOfValue = min(ofValues)\n ofValues.remove(minOfValue)\n partialSolution = bestCandidates[minOfValue]\n\n # print \"ASSIGNING ENCLAVES\"\n # print partialSolution\n\n regionId = 0\n for growReg in partialSolution:\n seedGrowReg = partialSolution[growReg][0]\n rm.assignSeeds(seedGrowReg, regionId)\n partialSolution[growReg].remove(seedGrowReg)\n if len(partialSolution[growReg]) >= 1:\n for areaInGrow in partialSolution[growReg]:\n rm.assignArea(areaInGrow, regionId)\n regionId += 1\n\n # CONSTRUCTION PHASE 2: ENCLAVES ASSIGNATION\n\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n rm.newExternal = set(rm.unassignedAreas)\n if len(rm.unassignedAreas) != 0:\n rm.constructionStage = \"enclaves\"\n while len(rm.unassignedAreas) != 0:\n rm.constructRegions()\n rm.objInfo = rm.getObjective(rm.region2Area)\n rm.feasibleRegions = copy.deepcopy(rm.region2Area)\n rm.getIntraBorderingAreas()\n\n # print \"ASSIGNED SOLUTION\"\n # print \"OBJ: \", rm.getObjective(rm.region2Area), rm.returnRegions()\n\n rm.calculateRegionValueThreshold()\n\n # LOCAL SEARCH\n\n rm.calcObj()\n convTabu = min(10,old_div(len(y),maxP)) # convTabu=230*numpy.sqrt(maxP)\n\n # print \"###ENTERING TABU\",rm.objInfo,rm.returnRegions()\n\n rm.tabuMove(tabuLength, convTabu = convTabu, typeTabu=typeTabu)\n rm.calcObj()\n\n # print \"***** AFTER TABU\",rm.objInfo,rm.returnRegions()\n # EVALUATE SOLUTION\n\n if rm.objInfo < basicMemory.objInfo:\n basicMemory.updateBasicMemory(rm)\n time = tm.time() - start\n Sol = basicMemory.regions\n Of = basicMemory.objInfo\n print(\"FINAL SOLUTION: \", Sol)\n print(\"FINAL OF: \", Of)\n output = { \"objectiveFunction\": Of,\n \"runningTime\": time,\n \"algorithm\": \"maxpTabu\",\n \"regions\": len(Sol),\n \"r2a\": Sol,\n \"distanceType\": distanceType,\n \"distanceStat\": distanceStat,\n \"selectionType\": selectionType,\n \"ObjectiveFuncionType\": objectiveFunctionType}\n print(\"Done\")\n return output", "def f1_score(self):", "def process_solve_kwargs(**kwargs):\n\n tol = kwargs.get('tol', DEFAULT_TOL)\n maxiter = kwargs.get('maxiter', MAX_ITER)\n Ainv = kwargs.get('Ainv', None)\n verbose = kwargs.get('verbose', False)\n\n if VERBOSE:\n print(\"tol:\", tol)\n print(\"maxiter:\", maxiter)\n print(\"Ainv:\", Ainv)\n\n return tol, int(maxiter), Ainv, verbose", "def _evalfunc_nonlin(self, ai_patch, apr_points, elts=None):\n # loop over fields involved in the function\n# li_nfields = len(self.func_arguments)\n# print \"li_nfields=\", li_nfields\n\n list_values_F = []\n for F in self.func_arguments:\n list_val = F.eval(ai_patch, elts)\n# print \"argfunc id : \", F.id\n# print \"argfunc coefs : \", F.get()\n# print \"eval on grids \", list_val\n\n list_values_F.append(list_val)\n\n # TODO to change when passed to ndof > 1\n lpr_val = self.func(list_values_F, apr_points)\n# print \"current Field : \", self.id\n# print \"lpr_val=\", lpr_val\n# print \"lpr_val.shape = \", lpr_val.shape\n return lpr_val", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def performFEA(params, aquifer, size, timestep, t1endtime):\n\n # Initialize parameters\n Hpdf = params[0]\n φpdf = params[1]\n Kpdf = params[2]\n ctinvpdf = 1/params[3]\n Qpdf = params[4]\n cspdf = params[5]\n\n # Calculate total number of time steps\n t1 = round(t1endtime / timestep)\n timeperiod = timestep * np.linspace(0, 2*t1, 2*t1+1)\n\n # Initialize boundary conditions\n rw = aquifer.rw #0.1\n rmax = aquifer.rmax #1000\n mu = aquifer.mu #0.31e-3\n elems = 25\n\n # Construct empty containers\n pmatrixwell = np.zeros([size, 2*t1+1])\n Tmatrixwell = np.zeros([size, 2*t1+1])\n\n # Run forward model with finite element method\n for index in range(size):\n pmatrixwell[index, :], Tmatrixwell[index, :] = main(aquifer=aquifer, degree=2, btype=\"spline\", elems=elems, rw=rw, rmax=rmax, H=Hpdf[index], mu=mu,\n φ=φpdf[index], ctinv=ctinvpdf[index], k_int=Kpdf[index], Q=Qpdf[index], timestep=timestep,\n t1endtime=t1endtime)\n\n # save array after each timestep for each run, export matrix from main()\n # save seperate runs in csv file, use mean from each timestep, plot 95% CI with seaborn\n # with open('pmatrix.npy', 'wb') as f:\n # np.save(f, pmatrixwell)\n\n # np.savetxt('data.csv', (col1_array, col2_array, col3_array), delimiter=',')\n\n return pmatrixwell, Tmatrixwell", "def _alpha(self):\n return _handle_ab(self.solution, self.use_const)[0]", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def prayerbead(*arg, **kw):\n # 2012-04-30 07:29 IJMC: Created\n # 2012-05-03 16:35 IJMC: Now can impose gaussian priors\n # 2012-09-17 14:08 IJMC: Fixed bug when shifting weights (thanks\n # to P. Cubillos)\n \n #from kapteyn import kmpfit\n import phasecurves as pc\n\n if kw.has_key('axis'):\n axis = kw['axis']\n else:\n axis = None\n\n if kw.has_key('parinfo'):\n parinfo = kw.pop('parinfo')\n else:\n parinfo = None\n\n if kw.has_key('verbose'):\n verbose = kw.pop('verbose')\n else:\n verbose = None\n\n if kw.has_key('step'):\n step = kw.pop('step')\n else:\n step = None\n\n if kw.has_key('maxiter'):\n maxiter = kw.pop('maxiter')\n else:\n maxiter = 3000\n\n if kw.has_key('maxfun'):\n maxfun = kw.pop('maxfun')\n else:\n maxfun = 6000\n\n if kw.has_key('xtol'):\n xtol = kw.pop('xtol')\n else:\n xtol = 1e-12\n\n if kw.has_key('ftol'):\n ftol = kw.pop('ftol')\n else:\n ftol = 1e-12\n\n guessparams = arg[0]\n modelfunction = arg[1]\n nparam = len(guessparams)\n\n if isinstance(arg[-1], dict): \n # Surreptiously setting keyword arguments:\n kw2 = arg[-1]\n kw.update(kw2)\n arg = arg[0:-1]\n else:\n pass\n\n narg = len(arg)\n helperargs = arg[2:narg-2]\n data = np.array(arg[-2], copy=False)\n weights = arg[-1]\n\n if data.ndim > 1:\n print \"I haven't implemented 2D multi-dimensional data handling yet!\"\n else:\n ndata = data.size\n\n \n if kw.has_key('npars'):\n print \"I haven't yet dealt with this for prayerbead analyses!\"\n npars = kw['npars']\n ret = []\n # Excise \"npars\" kw for recursive calling:\n lower_kw = kw.copy()\n junk = lower_kw.pop('npars')\n\n # Keep fixed pairs of joint parameters:\n if kw.has_key('jointpars'):\n jointpars = kw['jointpars']\n for jointpar in jointpars:\n params[jointpar[1]] = params[jointpar[0]]\n\n for ii in range(len(npars)):\n i0 = sum(npars[0:ii])\n i1 = i0 + npars[ii]\n these_params = arg[0][i0:i1]\n ret.append(resfunc(these_params, *arg[1][ii], **lower_kw))\n\n return ret\n\n\n \n\n fitter_args = (modelfunction,) + helperargs + (data, weights, kw)\n #fitter = kmpfit.Fitter(residuals=pc.devfunc, data=fitter_args)\n #fitter.parinfo = parinfo\n #fitter.fit(params0=guessparams)\n fmin_fit = fmin(pc.errfunc, guessparams, args=fitter_args, full_output=True, disp=False, maxiter=maxiter, maxfun=maxfun)\n bestparams = np.array(fmin_fit[0], copy=True)\n bestmodel = modelfunction(*((guessparams,) + helperargs))\n residuals = data - bestmodel\n allfits = np.zeros((ndata, nparam), dtype=float)\n allfits[0] = bestparams\n if verbose: print \"Finished prayer bead step \",\n for ii in range(1, ndata):\n shifteddata = bestmodel + np.concatenate((residuals[ii::], residuals[0:ii]))\n shiftedweights = np.concatenate((weights[ii::], weights[0:ii]))\n shifted_args = (modelfunction,) + helperargs + (shifteddata, shiftedweights, kw)\n\n fmin_fit = fmin(pc.errfunc, bestparams, args=shifted_args, full_output=True, disp=False, maxiter=maxiter, maxfun=maxfun, xtol=xtol, ftol=ftol)\n theseparams = fmin_fit[0]\n\n #lsq_fit = optimize.leastsq(pc.devfunc, bestparams, args=shifted_args, full_output=True)\n #theseparams = lsq_fit[0]\n #lsq_fit = lsq_fit[1]\n #bestchisq = pc.errfunc(bestparams, *fitargs)\n\n\n\n #newfitter = kmpfit.Fitter(residuals=pc.devfunc, data=shifted_args)\n #newfitter.parinfo = parinfo\n #newfitter.ftol = 1e-12\n #newfitter.xtol = 1e-12\n\n\n #try:\n # newfitter.fit(params0=bestparams)\n #except:\n # print \"Fitter crashed -- entering debug. Enter 'q' to quit.\"\n #theseparams = newfitter.params\n #del newfitter\n\n #bestmodel = modelfunction(*((guessparams,) + helperargs))\n #residuals = data - bestmodel\n #chisq = pc.errfunc(newfitter.params, *shifted_args)\n allfits[ii] = theseparams\n if verbose: print (\"%i of %1.\" % (ii+1, ndata)),\n #pdb.set_trace()\n\n return allfits", "def aF_oneway(*args):\r\n na = len(args) # ANOVA on 'na' groups, each in it's own array\r\n means = [0]*na\r\n vars = [0]*na\r\n ns = [0]*na\r\n alldata = []\r\n tmp = map(N.array,args)\r\n means = map(amean,tmp)\r\n vars = map(avar,tmp)\r\n ns = map(len,args)\r\n alldata = N.concatenate(args)\r\n bign = len(alldata)\r\n sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))\r\n ssbn = 0\r\n for a in args:\r\n ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))\r\n ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))\r\n sswn = sstot-ssbn\r\n dfbn = na-1\r\n dfwn = bign - na\r\n msb = ssbn/float(dfbn)\r\n msw = sswn/float(dfwn)\r\n f = msb/msw\r\n prob = fprob(dfbn,dfwn,f)\r\n return f, prob", "def get_problem():\n\n #User Defined Terrain Elevation\n #def terr( x_pos, y_pos ):\n #Defines terrain elevation [m] as a function of x and y positions [m]\n # elev=100.0*(np.sin(0.5*(x_pos/1000.0)))**2.0 #User defined elevation map\n # return elev\n\n #User Defined Tunnel Cost\n #def tunnel(depth):\n #Defines additional cost for placing a 1 meter length of track a non-zero\n #depth below the ground.\n # TunnelCost=(50e3)/(1+np.exp(-(depth-5))) #Tunneling Cost (2016 USD)\n # return TunnelCost\n\n #def bridge(height):\n #Defines additional cost for placing a 1 meter length of track a non-zero\n #heigh above the ground.\n # BridgeCost=10e3*(height/10)**2 #Bridge Cost (2016 USD)\n # return BridgeCost\n\n # Rename this and/or move to optim package?\n problem = beluga.optim.Problem('surftest_noinc')\n\n #Define independent variables\n problem.independent('t', 's')\n\n # Define equations of motion\n problem.state('x','V*cos(hdg)','m') \\\n .state('y','V*sin(hdg)','m') \\\n .state('V','amax*sin(thrA) + eps*(cos(thrA)+cos(hdgA))','m/s') \\\n .state('hdg','cmax/V*sin(hdgA)','rad')\n\n # Define controls\n #problem.control('thrA','rad') \\\n # .control('hdgA','rad')\n problem.control('hdgA','rad')\n\n # Define Cost Functional\n problem.cost['path'] = Expression('1','s')\n\n #problem.cost['path'] = Expression('TimeToUSD+trk*V', 'USD')\n\n #+ \\\n #'(50e3)/(1.0+exp(-1.0*(z-0.0*(sin(0.5*(x/1000.0)))**2.0-5.0)))+'+ \\\n #'10e3*((0.0*(sin(0.5*(x/1000.0)))**2.0-z)/10.0)**2.0','USD')\n\n #Define constraints\n problem.constraints().initial('x-x_0','m') \\\n .initial('y-y_0','m') \\\n .initial('V-V_0','m/s') \\\n .initial('hdg-hdg_0','rad') \\\n .terminal('x-x_f','m') \\\n .terminal('y-y_f','m')\n #.terminal('V-V_f','m/s')\n #.initial('hdg-hdg_0','rad') \\\n\n #Define constants\n problem.constant('g',9.81,'m/s^2') #Acceleration due to gravity\n problem.constant('trk',1,'USD/m') #Basic cost of 1 m of track on ground (10k per m)\n problem.constant('amax',1.0,'m/s^2') #Maximum thrust acceleration of vehicle\n problem.constant('cmax',1.0,'m/s^2') #Maximum allowed centripetal acceleration\n problem.constant('eps',10,'m/s^2') #Error constant\n problem.constant('TimeToUSD',1,'USD/s') #Time is Money!!\n problem.constant('thrA',0,'rad')\n\n #Unit scaling\n problem.scale.unit('m','x') \\\n .unit('s','x/V') \\\n .unit('rad',1) \\\n .unit('USD',1)\n\n #Configure solver\n problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose = True, cached = False, number_arcs=2)\n\n #Initial Guess\n problem.guess.setup('auto',start=[0.0,0.0,1.0,pi/4-0.2], costate_guess=-0.1) #City A\n\n #Add Continuation Steps\n problem.steps.add_step().num_cases(10) \\\n .terminal('x', 10) \\\n .terminal('y', 0)\n\n problem.steps.add_step().num_cases(10) \\\n .const('eps', 0.2)\n\n #problem.steps.add_step().num_cases(10) \\\n # .terminal('y', 2*pi*1000) \\\n # .terminal('z', 0.0) \\\n # .terminal('inc', 0.0)\n #^ City B\n return problem", "def boolean_func(experiment):", "def build_model_fn(self):", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def nnObjFunction(params, *args):\r\n \r\n\r\n \r\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\r\n w1 = params[0:n_hidden * (n_input + 1)].reshape( (n_hidden, (n_input + 1)))\r\n w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))\r\n\r\n\r\n rowsToChange=xrange(len(training_label))\r\n \r\n oneKEncoding = np.zeros((len(training_label),10))\r\n \r\n for x,y in izip(rowsToChange,training_label):\r\n oneKEncoding[x,int(y)]=1\r\n \r\n training_label=oneKEncoding\r\n\r\n trans_w1=w1.T\r\n trans_w2=w2.T\r\n \r\n #add bias 1\r\n x=np.column_stack((training_data,np.ones(len(training_data))))\r\n #equation1\r\n eq1=np.dot(x,trans_w1)\r\n #equation 2\r\n z=sigmoid(eq1)\r\n #add bias 1\r\n z=np.column_stack((z,np.ones(len(z))))\r\n #equation 3\r\n eq3=np.dot(z,trans_w2)\r\n #equation 4\r\n o=sigmoid(eq3)\r\n\r\n #-----------------------------------------Calculations for gradient weight vector 2---------------------------------------------\r\n \r\n delta=np.subtract(o,training_label)\r\n eq5=np.sum(np.square(delta))\r\n\r\n dabba=(training_label-o)*(1-o)*o\r\n \r\n grad_w2=np.multiply(-1,np.dot(dabba.T,z)) \r\n \r\n\r\n #-----------------------------------------Calculations for gradient weight vector 1---------------------------------------------\r\n\r\n one_minus_z_into_z = (1-z)*z\r\n \r\n \r\n multiply_by_summation = one_minus_z_into_z*np.dot(dabba,w2)\r\n \r\n grad_w1_without_minus_one = np.dot(np.transpose(multiply_by_summation),x)\r\n \r\n\r\n grad_w1=np.multiply(-1,grad_w1_without_minus_one)\r\n \r\n grad_w1 = np.delete(grad_w1, n_hidden,0) \r\n \r\n\r\n #-----------------------------------------Calculations for gradient object value----------------------------------------\r\n\r\n \r\n obj_val=eq5/len(training_data)\r\n \r\n #-----------------------------------------Regularization of gradient val and weight vector-------------------------------\r\n \r\n obj_val = obj_val+ (lambdaval/(2*len(training_data)))*( np.sum(np.square(w1)) + np.sum(np.square(w2)))\r\n grad_w2 = (grad_w2 + lambdaval*w2 )/ len(training_data) \r\n grad_w1 = (grad_w1 + lambdaval*w1 )/ len(training_data) \r\n \r\n \r\n\r\n #-----------------------------------------Concatenate both the weight vectors---------------------------------------------\r\n\r\n obj_grad = np.array([])\r\n obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)\r\n return (obj_val,obj_grad)", "def abetacf(a,b,x,verbose=1):\r\n ITMAX = 200\r\n EPS = 3.0e-7\r\n\r\n arrayflag = 1\r\n if type(x) == N.ndarray:\r\n frozen = N.ones(x.shape,N.float_) *-1 #start out w/ -1s, should replace all\r\n else:\r\n arrayflag = 0\r\n frozen = N.array([-1])\r\n x = N.array([x])\r\n mask = N.zeros(x.shape)\r\n bm = az = am = 1.0\r\n qab = a+b\r\n qap = a+1.0\r\n qam = a-1.0\r\n bz = 1.0-qab*x/qap\r\n for i in range(ITMAX+1):\r\n if N.sum(N.ravel(N.equal(frozen,-1)))==0:\r\n break\r\n em = float(i+1)\r\n tem = em + em\r\n d = em*(b-em)*x/((qam+tem)*(a+tem))\r\n ap = az + d*am\r\n bp = bz+d*bm\r\n d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))\r\n app = ap+d*az\r\n bpp = bp+d*bz\r\n aold = az*1\r\n am = ap/bpp\r\n bm = bp/bpp\r\n az = app/bpp\r\n bz = 1.0\r\n newmask = N.less(abs(az-aold),EPS*abs(az))\r\n frozen = N.where(newmask*N.equal(mask,0), az, frozen)\r\n mask = N.clip(mask+newmask,0,1)\r\n noconverge = asum(N.equal(frozen,-1))\r\n if noconverge <> 0 and verbose:\r\n print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'\r\n if arrayflag:\r\n return frozen\r\n else:\r\n return frozen[0]", "def evaltr(x_solution): \n \n large = 10.0**30\n pred = np.zeros(cfg.ntrain)\n e0 = 0.0 # mean of observed values\n y=0.0\n for i in range(cfg.ntrain): # Computation of correct piece\n e0 += cfg.a_unscaled[i][-1]\n pind = 0\n ipbest = 0\n pbest = -large # for max\n \n for j1 in range(cfg.nomax):\n ipmin=pind\n pmin=large # for min\n for _ in range(cfg.jk[j1]):\n piece=x_solution[(pind+1)*cfg.nfea-1] \n for j3 in range(cfg.nfea-1): #\n piece += x_solution[pind*cfg.nfea+j3]*cfg.a_unscaled[i][j3]\n if piece < pmin:\n ipmin = pind\n pmin = piece\n pind += 1 \n \n if pmin > pbest:\n ipbest = ipmin\n pbest = pmin\n \n pred[i] = x_solution[(ipbest+1)*cfg.nfea-1] # Computation of prediction\n for j1 in range(cfg.nfea-1):\n pred[i] += x_solution[ipbest*cfg.nfea+j1]*cfg.a_unscaled[i][j1]\n y += pred[i]\n \n y = y/cfg.ntrain \n e0 = e0/cfg.ntrain\n \n # Computation of indices\n rmse = 0.0\n mae = 0.0\n e1 = 0.0\n for i in range(cfg.ntrain):\n rmse += (pred[i]-cfg.a_unscaled[i][-1])**2\n mae += np.abs(pred[i]-cfg.a_unscaled[i][-1]) \n e1 += (cfg.a_unscaled[i][-1] - e0)**2\n ce = 1.0 - rmse/e1 \n rmse = np.sqrt(rmse/cfg.ntrain)\n mae = mae/cfg.ntrain \n\n if cfg.ntrain > 1:\n sx=0.0\n sy=0.0\n rcor=0.0\n for i in range(cfg.ntrain):\n sx += (pred[i]-y)**2\n sy += (cfg.a_unscaled[i][-1]-e0)**2 \n rcor += (pred[i]-y) * (cfg.a_unscaled[i][-1]-e0) \n\n r = rcor/np.sqrt(sx*sy)\n \n return rmse,mae,ce,r", "def MLE_procedure(func, bounds):\n return differential_evolution(func, bounds)", "def exercise_b2_82():\r\n pass", "def fl_search(fun, params: dict, n_iter: int=10)->dict:\n\n\n keys=list(params.keys())\n\n num_points={key: len(value) for key, value in params.items()}\n\n if not all(value == sorted(value) for key, value in params.items()):\n raise Exception(\" Some parameters are not in ascending order\")\n\n lower_point, upper_point=_init_upper_lower_points(keys=keys,num_points=num_points)\n move_up={}\n tracking=[]\n\n\n for _ in range(n_iter):\n # find the move direction for next round\n score,move_up= _find_move_direction(fun=fun,keys=keys,params=params,upper_point=upper_point,\n lower_point=lower_point,move_up=move_up)\n\n # Track the score for the optimization\n if len(tracking) >= 1 and score == tracking[-1]:\n break\n else:\n tracking.append(score)\n param = {}\n for key in keys:\n if move_up[key]:\n param[key] = params[key][upper_point[key]]\n else:\n param[key] = params[key][lower_point[key]]\n\n # Reset the lower_point and upper_point based move direction\n lower_point, upper_point = _reset_upper_lower_points(keys=keys, move_up=move_up,\n num_points=num_points,\n upper_point=upper_point,\n lower_point=lower_point)\n\n\n\n return (param, tracking)", "def mainFunction(f):\n\n #############################################################################\n \n \n # biomass hexagon\n predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'\n trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = -32768\n predND = -9999\n trgField = 'id'\n descrField = 'id'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'\n xyLim = (500, 500)\n xLab = 'Reference (tons/ha)'\n yLab = 'Prediction (tons/ha)'\n annoXY = (15,420)\n \n \n \"\"\"\n # cc\n predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'\n trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'\n #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = 255\n predND = 255\n trgField = 'id'\n descrField = 'id'\n #trgField = 'US_L3CODE'\n #descrField = 'US_L3NAME'\n #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'\n xyLim = (100, 100)\n xLab = 'Reference (%)'\n yLab = 'Prediction (%)'\n annoXY = (5,82)\n \"\"\"\n #############################################################################\n\n\n # get color setup\n norm = colors.Normalize(vmin=0, vmax=1)\n f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))\n \n # open the shapefile\t\n vDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n vSrc = vDriver.Open(shpF, 0)\n vLayer = vSrc.GetLayer()\n \n commonBox = get_intersec([predF, trainF])\n\n#for f in range(vLayer.GetFeatureCount()):\n feature = vLayer[f]\n name = feature.GetField(trgField)\n print('f: '+str(f))\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n if os.path.exists(outFig):\n #break\n return\n \n descr = feature.GetField(descrField)\n \n predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]\n trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()\n \n predP = ma.masked_equal(predP, predND)\n trainP = ma.masked_equal(trainP, trainND)\n trainP = ma.masked_equal(trainP, 0)\n\n combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))\n predP[combMask] = ma.masked\n trainP[combMask] = ma.masked\n predP = predP.compressed()\n trainP = trainP.compressed()\n if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():\n predP = np.array([0,0,1,1], dtype='float64')\n trainP = np.array([0,0,1,1], dtype='float64')\n mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)\n rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)\n \n\n totPixs = trainP.shape[0]\n sampSize = round(totPixs*1)\n pickFrom = range(sampSize)\n #sampIndex = np.random.choice(pickFrom, size=sampSize)\n sampIndex = pickFrom\n\n r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)\n if (mae == 0) & (r == 1):\n r = 0.0\n rColor = f2hex(f2rgb, r)\n p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind=\"hex\", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)\n p.ax_joint.set_xlabel(xLab)\n p.ax_joint.set_ylabel(yLab)\n p.ax_joint.annotate('r: '+str(r)+'\\nrmse: '+str(rmse)+'\\nmae: '+str(mae), annoXY)\n plt.tight_layout()\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n p.savefig(outFig)\n \n df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])\n outCSV = outFig.replace('.png','.csv')\n df.to_csv(outCSV, ',', index=False)", "def alphabeta_search(self,pos,board,nn):\n\n\t\tdef depth_eval_fn(board,nn):\n\t\t\treturn predict_nn(nn,board)\n\n\t\tdef max_value( board,alpha, beta, depth,a):\n\t\t\t# manual cutoff tests should be faster than reevaluation\n\t\t\t# it's mostly going to be depth!\n\t\t\tif depth>self.d:\n\t\t\t\treturn depth_eval_fn(board,nn)\n\t\t\tif pos.terminal_test(board,a):\n\t\t\t\treturn pos.terminal_util(board)\n\n\t\t\tv = -infinity\n\t\t\tGS = pos.successors(board)\n\t\t\tfor (a, s) in GS:\n\t\t\t\tv = max(v, min_value(s, alpha, beta, depth+1,a))\n\t\t\t\tif v >= beta:\n\t\t\t\t\treturn v\n\t\t\t\talpha = max(alpha, v)\n\t\t\treturn v\n\n\t\tdef min_value(board, alpha, beta, depth,a):\n\t\t\t# manual cutoff tests should be faster than reevaluation\n\t\t\tif depth>self.d:\n\t\t\t\treturn depth_eval_fn(board,nn)\n\t\t\tif pos.terminal_test(board,a):\n\t\t\t\treturn pos.terminal_util(board)\n\t\t\t\t\n\t\t\tv = infinity\n\t\t\tGS = pos.successors(board)\n\t\t\tfor (a, s) in GS:\n\t\t\t\tv = min(v, max_value(s, alpha, beta, depth+1,a))\n\t\t\t\tif v <= alpha:\n\t\t\t\t\treturn v\n\t\t\t\tbeta = min(beta, v)\n\t\t\treturn v\n\n\t\t# The default test cuts off at depth d or at a terminal board\n\t\t\n\t\taction_boards = pos.successors(board)\n\t\tactions = [i[0] for i in action_boards]\n\n\t\t# if there's only 1 available action, just take it\n\t\tif len(actions) == 1:\n\t\t\taction=actions[0]\n\t\telse:\n\t\t\tZ = argmax(action_boards,lambda (a,s): min_value(s, -infinity, infinity, 0,a))\n\t\t\taction = actions[Z]\n\t\treturn action", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")", "def exercise_b2_70():\r\n pass", "def refugia_adj_5(params, ns):\n #17 parameters \n nu1_1, nu1_2, nuA, nu2, nu3, m1_12, m1_21, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1a, T1b, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1a\n nu_T1 = [nu1_1, nuA]\n fs.integrate(nu_T1, T1a)\n ## Population function and migration matrix for T1b\n mig1 = numpy.array([[0, m1_12],[m1_21, 0]])\n fs.integrate(nu_T1, T1b, m=mig1) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1_2, nu2, nu3]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1_2, nu2, nu3]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3) \n return fs", "def FindCA(e,PV,F,w,m):\n global MAX\n le=len(e)\n if le > MAX:\n MAX = le\n #print the new best columns and how many they are\n system('clear')\n print \"So far, the best columns are %s and MAX is %d\" % (e,MAX)\n PV=ComputeCand(e,PV,F,w,m)\n lp=len(PV)\n if le+lp > MAX:\n for i in range(le+lp-MAX):\n newe=e+[PV[i]]\n test=collections.deque(columns2bin(newe, w))\n if is_necklace(test):\n FindCA(newe,PV[i+1:],F,w,m)\n else:\n break", "def main_function(self):\n self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats,\n im_data=self.input_data.value.real,\n re_axis=self.realgrid.grid,\n kernel_mode='freq_bosonic')\n model = np.ones_like(self.realgrid.grid)\n model /= np.trapz(model, self.realgrid.grid)\n\n preblur, bw = self.get_preblur()\n\n sol = self.ana_cont_probl.solve(method='maxent_svd',\n optimizer='newton',\n alpha_determination='chi2kink',\n model=model,\n stdev=self.input_data.error,\n interactive=False, alpha_start=1e10, alpha_end=1e-3,\n preblur=preblur, blur_width=bw)\n\n inp_str = 'atom {}, orb {}, spin {}, blur {}: '.format(self.input_data.atom,\n self.input_data.orbital,\n self.input_data.spin,\n bw)\n all_chis = np.isfinite(np.array([s.chi2 for s in sol[1]]))\n res_str = 'alpha_opt={:3.2f}, chi2(alpha_opt)={:3.2f}, min(chi2)={:3.2f}'.format(\n sol[0].alpha, sol[0].chi2, np.amin(all_chis)\n )\n self.text_output.append(inp_str + res_str)\n alphas = [s.alpha for s in sol[1]]\n chis = [s.chi2 for s in sol[1]]\n\n self.output_data.update(self.realgrid.grid, sol[0].A_opt, self.input_data)\n\n fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25)) # A4 paper size\n ax[0, 0].loglog(alphas, chis, marker='s', color='black')\n ax[0, 0].loglog(sol[0].alpha, sol[0].chi2, marker='*', color='red', markersize=15)\n ax[0, 0].set_xlabel(r'$\\alpha$')\n ax[0, 0].set_ylabel(r'$\\chi^2(\\alpha)$')\n\n ax[1, 0].plot(self.realgrid.grid, sol[0].A_opt)\n ax[1, 0].set_xlabel(r'$\\omega$')\n ax[1, 0].set_ylabel('spectrum')\n\n ax[0, 1].plot(self.input_data.mats, self.input_data.value.real,\n color='blue', ls=':', marker='x', markersize=5,\n label='Re[data]')\n ax[0, 1].plot(self.input_data.mats, self.input_data.value.imag,\n color='green', ls=':', marker='+', markersize=5,\n label='Im[data]')\n ax[0, 1].plot(self.input_data.mats, sol[0].backtransform.real,\n ls='--', color='gray', label='Re[fit]')\n ax[0, 1].plot(self.input_data.mats, sol[0].backtransform.imag,\n color='gray', label='Im[fit]')\n ax[0, 1].set_xlabel(r'$\\nu_n$')\n ax[0, 1].set_ylabel(self.input_data.data_type)\n ax[0, 1].legend()\n\n ax[1, 1].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).real,\n ls='--', label='real part')\n ax[1, 1].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).imag,\n label='imaginary part')\n ax[1, 1].set_xlabel(r'$\\nu_n$')\n ax[1, 1].set_ylabel('data $-$ fit')\n ax[1, 1].legend()\n plt.tight_layout()\n plt.show()", "def analize(slugs, parameters_for_align, alpha_variability, alpha2_variability, beta_variability):\n i = alpha_variability[0]\n bestI = 0\n bestResult = 0\n while i < alpha_variability[1]:\n print(\"ALPHA=\"+str(i))\n align.ALPHA = i\n align.align_particular(parameters_for_align)\n current=main(slugs, True, False)\n if current>bestResult:\n bestResult = current\n bestI = i\n i += alpha_variability[2]\n align.ALPHA = bestI\n i = alpha2_variability[0]\n bestI2 = 0\n bestResult2 = 0\n while i < alpha2_variability[1]:\n print(\"ALPHA2=\"+str(i))\n align.ALPHA2 = i\n align.align_particular(parameters_for_align)\n current=main(slugs, False, False)\n if current>bestResult2:\n bestResult2 = current\n bestI2 = i\n i += alpha2_variability[2]\n align.ALPHA2 = bestI2\n i = beta_variability[0]\n bestI3 = 0\n bestResult3 = bestResult2\n while i < beta_variability[1]:\n print(\"BETHA=\" + str(i))\n align.BETHA = i\n align.align_particular(parameters_for_align)\n current = main(slugs, False, False)\n if current > bestResult3:\n bestResult3 = current\n bestI3 = i\n i += beta_variability[2]\n print(\"Best ALPHA=\"+str(bestI))\n print(\"Best ALPHA2=\" + str(bestI2))\n print(\"Best BETHA=\" + str(bestI3))\n print(\"Best result=\" + str(bestResult3))", "def exercise_b2_69():\r\n pass", "def mtvsolver(Hurst_init, aest, yij, varyj, nj, j1, j2,\n mask, max_iter=100, init=None,\n prox_max_iter=5000, tol=1e-4, call_back=None, verbose=1,\n l=1, l1_ratio=0,lipschitz_constant=0, wtype=1):\n\n # shape of image box\n \n alpha = 1\n\n flat_mask = mask.ravel()\n volume_shape = mask.shape\n H_size = len(Hurst_init)\n\n if lipschitz_constant == 0:\n lipschitz_constant = lipschitz_constant_gradf(j1,j2,varyj, nj, wtype)\n\n #init[\"z\"] = Hurst_init.copy()\n #init[\"t\"] = 1\n #ini[\"stepsize\"] = 1 / lipschitz_constant\n\n def total_energy(x):\n return f(x, aest, yij, varyj, nj, j1, j2, wtype) + l * tv(_unmask(x,mask))\n\n def unmaskvec(w):\n return _unmask(w, mask)\n\n def maskvec(w):\n return w[flat_mask]\n\n def f1_grad(x):\n return gradf(x, aest, yij, varyj, nj, j1, j2, wtype)\n\n def f2_prox(w, stepsize, dgap_tol, init):\n out, info = _prox_tvl1(unmaskvec(w),\n weight= (l + 1.e-6) * stepsize, l1_ratio=l1_ratio,\n dgap_tol=dgap_tol, init=unmaskvec(init),\n max_iter=prox_max_iter, fista = False,\n verbose=verbose)\n #pdb.set_trace()\n return maskvec(out.ravel()), info\n\n w, obj, init = mfista(\n f1_grad, f2_prox, total_energy, lipschitz_constant, H_size,\n dgap_factor=(.1 + l1_ratio) ** 2, tol=tol, init=init, verbose=verbose,\n max_iter=max_iter, callback=None)\n \n return w, obj, init" ]
[ "0.5649461", "0.56487626", "0.5537902", "0.55107564", "0.5501089", "0.54266936", "0.5403812", "0.53776634", "0.5360801", "0.5356619", "0.5301135", "0.52775687", "0.5269604", "0.5262089", "0.52313644", "0.5223302", "0.5188832", "0.5179477", "0.516795", "0.51599634", "0.5152701", "0.51464236", "0.5137843", "0.5114498", "0.51131386", "0.5110136", "0.510727", "0.5101229", "0.5094462", "0.5088486", "0.50849485", "0.5084006", "0.5074736", "0.50705636", "0.5067694", "0.5063891", "0.506315", "0.50436014", "0.50406396", "0.50256103", "0.50217754", "0.5017324", "0.50142103", "0.5006749", "0.4999745", "0.49959216", "0.4987754", "0.4982246", "0.49822414", "0.49784103", "0.49775562", "0.4969105", "0.49649504", "0.49645236", "0.49645236", "0.49642566", "0.49607572", "0.4956832", "0.4956211", "0.49551076", "0.49457332", "0.49387717", "0.4933985", "0.4931413", "0.4931027", "0.49283883", "0.49276856", "0.4924316", "0.49216425", "0.49210605", "0.4918944", "0.49154362", "0.49100122", "0.49090624", "0.49067357", "0.49002907", "0.4900228", "0.49000865", "0.48960024", "0.48824134", "0.48787177", "0.48768905", "0.48763394", "0.48692164", "0.48649535", "0.48643926", "0.48602667", "0.48585516", "0.48514557", "0.48506594", "0.484707", "0.4843954", "0.48388103", "0.48306018", "0.4824848", "0.48216063", "0.4820881", "0.48188308", "0.48137003", "0.48134482" ]
0.599668
0
Given a function of onevariable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of tol. A bracketing interval is a triple (a,b,c) where (a<b<c) and func(b) < func(a),func(c). If bracket is two numbers then they are assumed to be a starting interval for a downhill bracket search (see bracket) Uses analog of bisection method to decrease the bracketed interval.
def golden(func, args=(), brack=None, tol=1.e-4, full_output=0): if brack is None: xa,xb,xc,fa,fb,fc,funcalls = bracket(func, args=args) elif len(brack) == 2: xa,xb,xc,fa,fb,fc,funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args) elif len(brack) == 3: xa,xb,xc = brack if (xa > xc): # swap so xa < xc can be assumed dum = xa; xa=xc; xc=dum assert ((xa < xb) and (xb < xc)), "Not a bracketing interval." fa = apply(func, (xa,)+args) fb = apply(func, (xb,)+args) fc = apply(func, (xc,)+args) assert ((fb<fa) and (fb < fc)), "Not a bracketing interval." funcalls = 3 else: raise ValueError, "Bracketing interval must be length 2 or 3 sequence." _gR = 0.61803399 _gC = 1.0-_gR x3 = xc x0 = xa if (abs(xc-xb) > abs(xb-xa)): x1 = xb x2 = xb + _gC*(xc-xb) else: x2 = xb x1 = xb - _gC*(xb-xa) f1 = apply(func, (x1,)+args) f2 = apply(func, (x2,)+args) funcalls += 2 while (abs(x3-x0) > tol*(abs(x1)+abs(x2))): if (f2 < f1): x0 = x1; x1 = x2; x2 = _gR*x1 + _gC*x3 f1 = f2; f2 = apply(func, (x2,)+args) else: x3 = x2; x2 = x1; x1 = _gR*x2 + _gC*x0 f2 = f1; f1 = apply(func, (x1,)+args) funcalls += 1 if (f1 < f2): xmin = x1 fval = f1 else: xmin = x2 fval = f2 if full_output: return xmin, fval, funcalls else: return xmin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bisect(f, x1, x2, tol=1.0e-9):\n assert callable(f), \"User-supplied function must be callable.\"\n assert x1 != x2, \"Bad initial range given to bracket.\"\n f1 = f(x1)\n f2 = f(x2)\n assert f1 * f2 < 0.0, \"Range does not clearly bracket a root.\"\n while abs(x2 - x1) > tol:\n x_mid = 0.5*(x1+x2)\n f_mid = f(x_mid)\n if f_mid == 0.0:\n return x_mid # found it\n if f_mid * f1 < 0.0:\n x2 = x_mid\n f2 = f_mid\n else:\n x1 = x_mid\n f1 = f_mid\n return x_mid", "def bisection(f,interval,tolerance,verbose=False):\n\n # set up initial bracketing interval\n # Note: Sign of function *must* change in this interval for method to work.\n (xa,xb) = interval\n fxa = f(xa)\n fxb = f(xb)\n if (fxa*fxb >=0):\n # no sign change in interval\n return None\n\n # set up for first iteration\n xm = (xb + xa)/2\n error = (xb - xa)/2\n iteration_count = 0\n\n # bisect until tolerance reached\n while (abs(error) > tolerance):\n\n # increment iteration count\n iteration_count += 1\n \n # evaluate function\n fxa = f(xa)\n fxb = f(xb)\n fxm = f(xm)\n\n # find which subinterval contains root\n if (fxm == 0):\n # accidentally landed on root (often occurs for \"toy\" test intervals)\n xa = xm\n xb = xm\n elif ((fxa * fxm) < 0):\n # sign change is in left half of interval\n xb = xm\n else:\n # sign change is in right half of interval\n xa = xm\n\n # find new midpoint (and change in midpoint)\n xm_old = xm\n xm = (xb + xa)/2\n error = xm - xm_old\n\n # verbose iteration log\n if (verbose):\n print(\"iteration\", iteration_count, \"(bisection):\",\n \"interval\", (xa, xb), \"root\", xm)\n \n return xm", "def bisect2d(f, x1, x2, tol=1.0e-9):\n assert callable(f), \"User-supplied function must be callable.\"\n assert x1 != x2, \"Bad initial range given to bracket.\"\n f1 = f(x1)\n f2 = f(x2)\n assert f1 * f2 < 0.0, \"Range does not clearly bracket a root.\"\n while abs(x2 - x1) > tol:\n x_mid = 0.5*(x1+x2)\n f_mid = f(x_mid)\n if f_mid == 0.0:\n return x_mid # found it\n if f_mid * f1 < 0.0:\n x2 = x_mid\n f2 = f_mid\n else:\n x1 = x_mid\n f1 = f_mid\n return x_mid", "def test_bisection(testFunctions, ranges,tol, printFlag):\n \n for i in range(len(testFunctions)):\n scipyValue = sp.optimize.bisect(testFunctions[i],ranges[i,0],ranges[i,1])\n nonlinValue =\n pass", "def biseccion(func, a, b, tol=1e-4):\n p = (a + b) / 2 \n while np.fabs(func(p)) > tol:\n p = (a + b) / 2 \n if func(a) * func(p) < 0:\n b = p\n elif func(a) * func(p) > 0:\n a = p\n else:\n return p\n return p", "def minimize_cost_golden_float(f, vmin, start, stop, tol=1e-8, maxiter=1000):\n # type: (Callable[[float], float], float, float, float, float, int) -> MinCostResult\n\n fa = f(start)\n if fa >= vmin:\n # solution found at start\n return MinCostResult(x=start, xmax=None, vmax=None, nfev=1)\n\n fb = f(stop) # type: Optional[float]\n if fb is None:\n raise TypeError(\"f(stop) returned None instead of float\")\n if fb >= vmin:\n # found upper bound, use binary search to find answer\n return minimize_cost_binary_float(f, vmin, start, stop, tol=tol, save=stop, nfev=2)\n\n # solution is somewhere in middle\n gr = (5 ** 0.5 + 1) / 2\n delta = (stop - start) / gr\n c = stop - delta\n d = start + delta\n\n fc = f(c) # type: Optional[float]\n if fc is None:\n raise TypeError(\"f(c) returned None instead of float\")\n if fc >= vmin:\n # found upper bound, use binary search to find answer\n return minimize_cost_binary_float(f, vmin, start, c, tol=tol, save=stop, nfev=3)\n\n fd = f(d) # type: Optional[float]\n if fd is None:\n raise TypeError(\"f(d) returned None instead of float\")\n if fd >= vmin:\n # found upper bound, use binary search to find answer\n return minimize_cost_binary_float(f, vmin, start, c, tol=tol, save=stop, nfev=4)\n\n if fc > fd:\n a, b, d = start, d, c\n c = b - (b - a) / gr\n fb, fc, fd = fd, None, fc\n else:\n a, b, c = c, stop, d\n d = a + (b - a) / gr\n fa, fc, fd = fc, fd, None\n\n nfev = 4\n while abs(b - a) > tol and nfev < maxiter:\n if fc is None:\n fc = f(c)\n else:\n fd = f(d)\n assert fc is not None, 'Either fc or fd was None and the above should have set it'\n assert fd is not None, 'Either fc or fd was None and the above should have set it'\n nfev += 1\n if fc > fd:\n if fc >= vmin:\n return minimize_cost_binary_float(f, vmin, a, c, tol=tol, save=stop, nfev=nfev)\n b, d = d, c\n c = b - (b - a) / gr\n fb, fc, fd = fd, None, fc\n else:\n if fd >= vmin:\n return minimize_cost_binary_float(f, vmin, a, d, tol=tol, save=stop, nfev=nfev)\n a, c = c, d\n d = a + (b - a) / gr\n fa, fc, fd = fc, fd, None\n\n test = (a + b) / 2\n vmax = f(test)\n nfev += 1\n if vmax >= vmin:\n return MinCostResult(x=test, xmax=test, vmax=vmax, nfev=nfev)\n else:\n return MinCostResult(x=None, xmax=test, vmax=vmax, nfev=nfev)", "def bisect(f, lo=0, hi=None, eps=1e-9):\n lo_bool = f(lo)\n if hi is None:\n offset = 1\n while f(lo+offset) == lo_bool:\n offset *= 2\n hi = lo + offset\n else:\n assert f(hi) != lo_bool\n while hi - lo > eps:\n mid = (hi + lo) / 2\n if f(mid) == lo_bool:\n lo = mid\n else:\n hi = mid\n if lo_bool:\n return lo\n else:\n return hi", "def rootfind_newton(func, x0, a, b, maxiter=50, tol=1.0e-11):\n\n for iter in xrange(maxiter):\n\n fval, fpval, args = func(x0)\n # print \"x0=%.4f fval=%.2e fpval=%.2e [%.4f, %.4f]\" % (x0, fval, fpval, a, b)\n\n if fval < 0:\n a = x0\n else:\n b = x0\n\n x = x0 - fval/fpval\n if not (a < x < b):\n # Once we have bracketed the root, we don't allow the\n # iterations to leave the bracket.\n x = 0.5*(a+b)\n\n if np.abs(x-x0) < tol or np.abs(fval) < tol:\n break\n\n x0 = x\n\n return x, fval, iter, args", "def bisect(rlo, rhi, acc, tol, fun, params=None):\n while rhi-rlo>acc:\n r=0.5*(rhi+rlo)\n if params: isvalid=fun(r,tol,params)\n else: isvalid=fun(r,tol)\n if isvalid:\n rlo=r\n else:\n rhi=r\n return rlo", "def bisect(\n f: Callable,\n a: Array,\n b: Array,\n args: Tuple = (),\n xtol: float = 1e-7,\n ftol: float = 1e-7,\n maxiter: int = 100,\n full_output: bool = False,\n range_check: bool = True,\n) -> Union[Array, dict]:\n\n fa = f(*((a,) + args))\n fb = f(*((b,) + args))\n if range_check and snp.any(snp.sign(fa) == snp.sign(fb)):\n raise ValueError(\"Initial bisection range does not bracket zero.\")\n\n for numiter in range(maxiter):\n c = (a + b) / 2.0\n fc = f(*((c,) + args))\n fcs = snp.sign(fc)\n a = snp.where(snp.logical_or(snp.sign(fa) * fcs == 1, fc == 0.0), c, a)\n b = snp.where(snp.logical_or(fcs * snp.sign(fb) == 1, fc == 0.0), c, b)\n fa = f(*((a,) + args))\n fb = f(*((b,) + args))\n xerr = snp.max(snp.abs(b - a))\n ferr = snp.max(snp.abs(fc))\n if xerr <= xtol and ferr <= ftol:\n break\n\n idx = snp.argmin(snp.stack((snp.abs(fa), snp.abs(fb))), axis=0)\n x = snp.choose(idx, (a, b))\n if full_output:\n r = x, {\"iter\": numiter, \"xerr\": xerr, \"ferr\": ferr, \"a\": a, \"b\": b}\n else:\n r = x\n return r", "def solve_bisection(func, target, xmin, xmax):\n tol = 1e-10 # when |a - b| <= tol, quit searching\n max_iters = 1e2 # maximum number of iterations\n a = xmin\n b = xmax\n cnt = 1\n # before entering while(), calculate Fa\n Fa = target - func(a)\n c = a\n\n # bisection search loop\n while np.abs(a - b) > tol and cnt < max_iters:\n cnt += 1\n # make 'c' be the midpoint between 'a' and 'b'\n c = (a + b) / 2.0\n # calculate at the new 'c'\n Fc = target - func(c)\n\n if Fc == 0:\n # 'c' was the sought-after solution, so quit\n break\n elif np.sign(Fa) == np.sign(Fc):\n # the signs were the same, so modify 'a'\n a = c\n Fa = Fc\n else:\n # the signs were different, so modify 'b'\n b = c\n\n if cnt == max_iters:\n print('WARNING: max iterations reached')\n\n return c", "def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):\n\t_gold = 1.618034\n\t_verysmall_num = 1e-21\n\tfa = apply(func, (xa,)+args)\n\tfb = apply(func, (xb,)+args)\n\tif (fa < fb):\t\t\t # Switch so fa > fb\n\t dum = xa; xa = xb; xb = dum\n\t dum = fa; fa = fb; fb = dum\n\txc = xb + _gold*(xb-xa)\n\tfc = apply(func, (xc,)+args)\n\tfuncalls = 3\n\titer = 0\n\twhile (fc < fb):\n\t tmp1 = (xb - xa)*(fb-fc)\n\t tmp2 = (xb - xc)*(fb-fa)\n\t val = tmp2-tmp1\n\t if abs(val) < _verysmall_num:\n\t\tdenom = 2.0*_verysmall_num\n\t else:\n\t\tdenom = 2.0*val\n\t w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom\n\t wlim = xb + grow_limit*(xc-xb)\n\t if iter > maxiter:\n\t\traise RuntimeError, \"Too many iterations.\"\n\t iter += 1\n\t if (w-xc)*(xb-w) > 0.0:\n\t\tfw = apply(func, (w,)+args)\n\t\tfuncalls += 1\n\t\tif (fw < fc):\n\t\t xa = xb; xb=w; fa=fb; fb=fw\n\t\t return xa, xb, xc, fa, fb, fc, funcalls\n\t\telif (fw > fb):\n\t\t xc = w; fc=fw\n\t\t return xa, xb, xc, fa, fb, fc, funcalls\n\t\tw = xc + _gold*(xc-xb)\n\t\tfw = apply(func, (w,)+args)\n\t\tfuncalls += 1\n\t elif (w-wlim)*(wlim-xc) >= 0.0:\n\t\tw = wlim\n\t\tfw = apply(func, (w,)+args)\n\t\tfuncalls += 1\n\t elif (w-wlim)*(xc-w) > 0.0:\n\t\tfw = apply(func, (w,)+args)\n\t\tfuncalls += 1\n\t\tif (fw < fc):\n\t\t xb=xc; xc=w; w=xc+_gold*(xc-xb)\n\t\t fb=fc; fc=fw; fw=apply(func, (w,)+args)\n\t\t funcalls += 1\n\t else:\n\t\tw = xc + _gold*(xc-xb)\n\t\tfw = apply(func, (w,)+args)\n\t\tfuncalls += 1\n\t xa=xb; xb=xc; xc=w\n\t fa=fb; fb=fc; fc=fw\n\treturn xa, xb, xc, fa, fb, fc, funcalls", "def get_new_bracket(x1, x2, x3, x4):\n points = [x1, x2, x3]\n dist = float(inf)\n for point in points:\n if abs(x4 - point) < dist and f(point) * f(x4) < 0:\n valid_point = point\n dist = abs(x4 - point)\n return valid_point", "def bounded_aitken(f, x0, x1, y0, y1, x, yval, xtol, ytol):\n _abs = abs\n if y1 < 0.: x0, y0, x1, y1 = x1, y1, x0, y0\n dx1 = x1-x0\n dy = yval-y0\n if not (x0 < x < x1 or x1 < x < x0):\n x = x0 + dy*dx1/(y1-y0)\n yval_ub = yval + ytol\n yval_lb = yval - ytol\n while _abs(dx1) > xtol:\n y = f(x)\n if y > yval_ub:\n x1 = x\n y1 = y\n elif y < yval_lb:\n x0 = x\n y0 = y\n dy = yval-y\n else: \n return x\n dx0 = x1-x0\n g = x0 + dy*dx0/(y1-y0)\n if _abs(dx0) < xtol:\n return g\n \n y = f(g)\n if y > yval_ub:\n x1 = g\n y1 = y\n elif y < yval_lb:\n x0 = g\n y0 = y\n dy = yval-y\n else:\n return g\n dx1 = x1-x0\n gg = x0 + dy*dx1/(y1-y0)\n dxg = x - g\n try: x = x - dxg**2./(gg + dxg - g)\n except:\n # Add overshoot to prevent getting stuck\n x = gg + 0.1*(x1+x0-2*gg)*(dx1/dx0)**3. \n else:\n if not (x0 < x < x1 or x1 < x < x0):\n x = gg + 0.1*(x1+x0-2*gg)*(dx1/dx0)**3. \n return x", "def minimize_scalar(\n func: Callable,\n bracket: Optional[Union[Sequence[float]]] = None,\n bounds: Optional[Sequence[float]] = None,\n args: Union[Tuple, Tuple[Any]] = (),\n method: str = \"brent\",\n tol: Optional[float] = None,\n options: Optional[dict] = None,\n) -> spopt.OptimizeResult:\n\n def f(x, *args):\n # Wrap jax-based function `func` to return a numpy float rather\n # than a jax array of size (1,)\n return func(x, *args).item()\n\n res = spopt.minimize_scalar(\n fun=f,\n bracket=bracket,\n bounds=bounds,\n args=args,\n method=method,\n tol=tol,\n options=options,\n )\n return res", "def fmin_powell(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None,\n maxfun=None, full_output=0, disp=1, retall=0, callback=None,\n direc=None, holdfixed=None):\n # 2010-07-01 11:17 IJC: Added keyword option\n\n from scipy import optimize\n from numpy import asarray, eye, pi, squeeze\n\n def wrap_function(function, args, **kw):\n ncalls = [0]\n def function_wrapper(x):\n ncalls[0] += 1\n return function(x, *args, **kw)\n return ncalls, function_wrapper\n\n def _linesearch_powell(func, p, xi, tol=1e-3):\n \"\"\"Line-search algorithm using fminbound.\n\n Find the minimium of the function ``func(x0+ alpha*direc)``.\n\n \"\"\"\n def myfunc(alpha):\n return func(p + alpha * xi)\n alpha_min, fret, iter, num = optimize.brent(myfunc, full_output=1, tol=tol)\n xi = alpha_min*xi\n return squeeze(fret), p+xi, xi\n\n\n # Set up holdfixed arrays\n if holdfixed is not None:\n holdfixed = np.array(holdfixed)\n #x0[holdfixed] = x0[holdfixed]\n holdsome = True\n else:\n holdsome = False\n #holdfixed = np.zeros(params.size, dtype=bool)\n\n # we need to use a mutable object here that we can update in the\n # wrapper function\n fcalls, func = wrap_function(func, args, **kw)\n x = asarray(x0).flatten()\n xoriginal = x.copy()\n if retall:\n allvecs = [x]\n N = len(x)\n rank = len(x.shape)\n if not -1 < rank < 2:\n raise ValueError, \"Initial guess must be a scalar or rank-1 sequence.\"\n if maxiter is None:\n maxiter = N * 1000\n if maxfun is None:\n maxfun = N * 1000\n\n\n if direc is None:\n direc = eye(N, dtype=float)\n else:\n direc = asarray(direc, dtype=float)\n\n fval = squeeze(func(x))\n x1 = x.copy()\n iter = 0;\n ilist = range(N)\n while True:\n fx = fval\n bigind = 0\n delta = 0.0\n for i in ilist:\n direc1 = direc[i]\n fx2 = fval\n if (not holdsome) or (i not in holdfixed):\n fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)\n if (fx2 - fval) > delta:\n delta = fx2 - fval\n bigind = i\n iter += 1\n if callback is not None:\n callback(x)\n if retall:\n allvecs.append(x)\n if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break\n if fcalls[0] >= maxfun: break\n if iter >= maxiter: break\n\n # Construct the extrapolated point\n direc1 = x - x1\n x2 = 2*x - x1\n if holdsome:\n x2[holdfixed] = xoriginal[holdfixed]\n x1 = x.copy()\n fx2 = squeeze(func(x2))\n\n if (fx > fx2):\n t = 2.0*(fx+fx2-2.0*fval)\n temp = (fx-fval-delta)\n t *= temp*temp\n temp = fx-fx2\n t -= delta*temp*temp\n if t < 0.0:\n fval, x, direc1 = _linesearch_powell(func, x, direc1,\n tol=xtol*100)\n if holdsome:\n x[holdfixed] = xoriginal[holdfixed]\n direc[bigind] = direc[-1]\n direc[-1] = direc1\n\n warnflag = 0\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\"\n elif iter >= maxiter:\n warnflag = 2\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % iter\n print \" Function evaluations: %d\" % fcalls[0]\n\n x = squeeze(x)\n\n if full_output:\n retlist = x, fval, direc, iter, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist", "def fmin(evaluator, xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, callback=None):\n fcalls, func = wrap_function(evaluator.target)\n x0 = evaluator.x\n #x0 = asfarray(x0).flatten()\n N = len(x0)\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n sim = []\n fsim = [.0]*(N+1)\n for i in range(0,N+1):\n sim.append([.0]*(N+1))\n\n sim[0] = x0\n \n fsim[0] = func(x0)\n nonzdelt = 0.05\n zdelt = 0.00025\n for k in range(0,N):\n y = list(x0)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = sort_permutation(fsim)\n fsim = apply_permutation(fsim,ind)\n # sort so sim[0,:] has the lowest function value\n sim = apply_permutation(sim,ind)\n evaluator.x = sim[0]\n\n iterations = 1\n\n \n while (fcalls[0] < maxfun and iterations < maxiter):\n sim_size = max(map(lambda x : max(map(abs,map(operator.sub, x, sim[0]))),sim[1:]))\n #print \"The simplex size is %.6g(tol=%.6g)\"%(sim_size,xtol)\n fsim_size = max( map(lambda x: abs(x-fsim[0]), fsim[1:]))\n #print \"The simplex image size is %.6g(tol=%.6g)\"%(fsim_size, ftol)\n if ( sim_size <= xtol ) \\\n and fsim_size <=ftol:\n break\n# if (max(numpy.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n# and max(abs(fsim[0]-fsim[1:])) <= ftol):\n# break\n\n xbar = averageArrays(sim[:-1])\n xr = linearCombine((1+rho),xbar, - rho,sim[-1])\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = linearCombine((1+rho*chi),xbar, - rho*chi,sim[-1])\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = linearCombine((1+psi*rho),xbar, - psi*rho,sim[-1])\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = linearCombine((1-psi),xbar, psi,sim[-1])\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = linearCombine((1-sigma),sim[0] , sigma,sim[j])\n fsim[j] = func(sim[j])\n\n ind = sort_permutation(fsim)\n sim = apply_permutation(sim,ind)\n fsim = apply_permutation(fsim,ind)\n evaluator.x = sim[0]\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n printOut(\"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\")\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n printOut(\"Warning: Maximum number of iterations has been exceeded\")\n else:\n if disp:\n printOut(\"Optimization terminated successfully.\")\n printOut(\" Current function value: %f\" % fval)\n printOut(\" Iterations: %d\" % iterations)\n printOut(\" Function evaluations: %d\" % fcalls[0])\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n else:\n retlist = x\n\n return retlist", "def leastsqbound(func,x0,bounds,args=(),**kw):\n # check for full output\n if \"full_output\" in kw and kw[\"full_output\"]:\n full=True\n else:\n full=False\n\n # convert x0 to internal variables\n i0 = external2internal(x0,bounds)\n\n # perfrom unconstrained optimization using internal variables\n r = leastsq(err,i0,args=(bounds,func,args),**kw)\n\n # unpack return convert to external variables and return\n if full:\n xi,cov_xi,infodic,mesg,ier = r\n xe = internal2external(xi,bounds)\n cov_xe = i2e_cov_x(xi,bounds,cov_xi)\n # XXX correct infodic 'fjac','ipvt', and 'qtf' \n return xe,cov_xe,infodic,mesg,ier \n\n else:\n xi,ier = r\n xe = internal2external(xi,bounds)\n return xe,ier", "def min(x):\n pass", "def newtons_method(function, start, epsilon_rounding=6):\n point = start\n\n f = get_gradient(function)\n jacobian_matrix = get_jacobian(f)\n inverse_jacobian = jacobian_matrix.inv()\n\n f_subs = gradient_subs(f, point)\n\n temp = [0, 0]\n\n points = [point]\n while temp != point:\n jacobian_subs_matrix = matrix_subs(jacobian_matrix, point)\n inverse_subs_jacobian = matrix_subs(inverse_jacobian, point)\n negative_gradient = Matrix([-x for x in f_subs])\n solution = Ax_b(jacobian_subs_matrix, negative_gradient)\n temp = [round(float(x), epsilon_rounding) for x in point]\n point = [a + b for a, b in zip(solution, point)]\n point = [round(float(x), epsilon_rounding) for x in point]\n points.append(point)\n f_subs = gradient_subs(f, point)\n new_minimum = [float(x) for x in point]\n\n return new_minimum, points, f\"The minimum is {new_minimum}, with a starting point of {start}\"", "def my_func(a, b, c):\r\n return (a + b + c) - min(a, b, c)", "def test_bisection_1de(self):\n logging.info(\"\\nANSWERS TO EXERCISE 1.1D\")\n func = lambda x: x**7 - 7 * x**6 + 21 * x**5 - 35 * x**4 + 35 * x**3 - 21 * x**2 + 7 * x - 1\n starting_left = 0.95\n starting_right = 1.01\n\n # Margaret chose this case as a funky example of bisection. The root should NOT be in\n # the interval for this function.\n root, (left, right) = undertest.bisection(func, starting_left, starting_right, self.maxit)\n desired_root = 1.0\n self.assertFalse(_root_in_interval(desired_root, left, right))\n\n # Now let's try the factored form of func. Here the interval SHOULD contain the true root.\n logging.info(\"\\nRUNNING EXERCISE 1.1E\")\n factored_func = lambda x: (x - 1)**7\n root, (left, right) = undertest.bisection(\n factored_func, starting_left, starting_right, self.maxit)\n self.assertTrue(_root_in_interval(desired_root, left, right))", "def bounded_wegstein(f, x0, x1, y0, y1, x, yval, xtol, ytol):\n _abs = abs\n if y1 < 0.: x0, y0, x1, y1 = x1, y1, x0, y0\n dy = yval-y0\n x_old = x = x if x0 < x < x1 or x1 < x < x0 else x0+dy*(x1-x0)/(y1-y0)\n y = f(x)\n yval_ub = yval + ytol\n yval_lb = yval - ytol\n if y > yval_ub:\n x1 = x\n y1 = y\n elif y < yval_lb:\n x0 = x\n y0 = y\n dy = yval - y\n else:\n return x\n dx1x0 = x1-x0\n x = g0 = x0 + dy*dx1x0/(y1-y0)\n while _abs(dx1x0) > xtol:\n y = f(x)\n if y > yval_ub:\n x1 = x\n y1 = y\n elif y < yval_lb:\n x0 = x\n y0 = y\n dy = yval - y\n else: break\n dx1x0 = x1-x0\n g1 = x0 + dy*dx1x0/(y1-y0)\n dx = x - x_old\n try:\n w = dx/(dx-g1+g0)\n x_old = x\n x = w*g1 + (1.-w)*x\n except:\n x = g0 = g1\n else:\n if x0 < x < x1 or x1 < x < x0: g0 = g1 \n else: x = g0 = g1\n return x", "def BisectionMethod(f, a=0, b=1, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\t\n\t# Initialization of errors and iters\n\terrs = []\n\ti = 0\n\n\tif f_a == 0:\n\t\treturn a\n\telif f_b == 0:\n\t\treturn b\n\telif f_a*f_b > 0:\n\t\tprint(\"The function values have the same sign!\")\n\telse:\n\t\terror = b-a\n\t\twhile error > tol:\n\t\t\tc = (b+a)/2\n\t\t\tf_c = f(c)\n\t\t\t\n\t\t\terrs.append(error)\n\t\t\t\n\t\t\tif f_a*f_c > 0:\n\t\t\t\ta = c\n\t\t\t\tf_a = f_c\n\t\t\telif f_a*f_c < 0:\n\t\t\t\tb = c\n\t\t\t\tf_b = f_c\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\terror = b-a\n\t\t\ti = i+1\n\tend = time()\n\treturn c, (end-start), i", "def bisect_left(func, val, low, high):\n\n a = low\n b = high\n while b > a:\n guess = (a+b)//2\n\n if val > func(guess):\n a = guess+1\n else:\n b = guess\n\n return a", "def bisecter(func, step=0.1):\n points = list(func.points(step))\n area = sum(map(lambda p: p[1], points))\n\n current = 0.\n for x, y in points:\n current += y\n if current >= area / 2:\n return x", "def findmin(f, ranges, args=(), Ns=None, full_output=False, method='brute',\n finish=False):\n if method == 'brute':\n Ns = Ns or 3\n x0, J0, xs, Jout = brute(f, ranges, args=args, Ns=Ns, full_output=True)\n elif method == 'monte carlos':\n Ns = Ns or 1000\n x0, J0, xs, Jout = monte_carlos(f, ranges, args=args, Ns=Ns, full_output=True)\n else:\n valid_methods = ('brute', 'monte carlos')\n raise ValueError('optimization method must be one of {0!r}'.format(\n ', '.join(valid_methods)))\n\n # Mask any values that are not finite\n mask = np.isfinite(Jout)\n xs = xs[mask]\n Jout = Jout[mask]\n if not len(xs):\n raise RuntimeError('Failed to find optimized parameters')\n\n if finish:\n import scipy.optimize\n res = scipy.optimize.fmin(f, x0, args=args, full_output=True)\n x0, J0 = res[0:2]\n\n if not full_output:\n return x0\n return x0, J0, xs, Jout", "def fmin(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, retall=0, callback=None, zdelt = 0.00025, nonzdelt = 0.05, \n holdfixed=None):\n # 2011-04-13 14:26 IJMC: Adding Keyword option\n # 2011-05-11 10:48 IJMC: Added the zdelt and nonzdelt options\n # 2011-05-30 15:36 IJMC: Added the holdfixed option\n\n def wrap_function(function, args, **kw):\n ncalls = [0]\n def function_wrapper(x):\n ncalls[0] += 1\n return function(x, *args, **kw)\n return ncalls, function_wrapper\n\n # Set up holdfixed arrays\n if holdfixed is not None:\n holdfixed = np.array(holdfixed)\n #x0[holdfixed] = x0[holdfixed]\n holdsome = True\n else:\n holdsome = False\n #holdfixed = np.zeros(params.size, dtype=bool)\n \n #if holdsome:\n # print \"holdfixed>>\", holdfixed\n\n fcalls, func = wrap_function(func, args, **kw)\n x0 = np.asfarray(x0).flatten()\n xoriginal = x0.copy()\n N = len(x0)\n rank = len(x0.shape)\n if not -1 < rank < 2:\n raise ValueError, \"Initial guess must be a scalar or rank-1 sequence.\"\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n if rank == 0:\n sim = np.zeros((N+1,), dtype=x0.dtype)\n else:\n sim = np.zeros((N+1,N), dtype=x0.dtype)\n fsim = np.zeros((N+1,), float)\n sim[0] = x0\n if retall:\n allvecs = [sim[0]]\n #print func.__name__\n #print x0\n fsim[0] = func(x0)\n for k in range(0,N):\n y = np.array(x0,copy=True)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n if holdsome and k in holdfixed:\n y[k] = xoriginal[k]\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = np.argsort(fsim)\n fsim = np.take(fsim,ind,0)\n # sort so sim[0,:] has the lowest function value\n sim = np.take(sim,ind,0)\n\n iterations = 1\n\n while (fcalls[0] < maxfun and iterations < maxiter):\n ### IJC Edit to understand fmin!\n ##print 'xtol>> ' + str(max(np.ravel(abs(sim[1:]-sim[0])))) + ' > ' + str(xtol)\n ##print 'ftol>> ' + str(max(abs(fsim[0]-fsim[1:]))) + ' > ' + str(ftol)\n if (max(np.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n and max(abs(fsim[0]-fsim[1:])) <= ftol):\n break\n\n xbar = np.add.reduce(sim[:-1],0) / N\n xr = (1+rho)*xbar - rho*sim[-1]\n if holdsome:\n xr[holdfixed] = xoriginal[holdfixed]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1+rho*chi)*xbar - rho*chi*sim[-1]\n if holdsome:\n xe[holdfixed] = xoriginal[holdfixed]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1+psi*rho)*xbar - psi*rho*sim[-1]\n if holdsome:\n xc[holdfixed] = xoriginal[holdfixed]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = (1-psi)*xbar + psi*sim[-1]\n if holdsome:\n xcc[holdfixed] = xoriginal[holdfixed]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma*(sim[j] - sim[0])\n if holdsome:\n sim[j, holdfixed] = xoriginal[holdfixed]\n fsim[j] = func(sim[j])\n\n ind = np.argsort(fsim)\n sim = np.take(sim,ind,0)\n fsim = np.take(fsim,ind,0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n if retall:\n allvecs.append(sim[0])\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\"\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % iterations\n print \" Function evaluations: %d\" % fcalls[0]\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist", "def minimize_cost_golden(f, vmin, offset=0, step=1, maxiter=1000):\n # type: (Callable[[int], float], float, int, int, Optional[int]) -> MinCostResult\n fib2 = fib1 = fib0 = 0\n cur_idx = 0\n nfev = 0\n xmax = vmax = v_prev = None\n while maxiter is None or nfev < maxiter:\n v_cur = f(step * fib0 + offset)\n nfev += 1\n\n if v_cur >= vmin:\n # found upper bound, use binary search to find answer\n stop = step * fib0 + offset\n return minimize_cost_binary(f, vmin, start=step * (fib1 + 1) + offset,\n stop=stop, save=stop, step=step, nfev=nfev)\n else:\n if vmax is not None and v_cur <= vmax:\n if cur_idx <= 3:\n # special case: 0 <= xmax < 3, and we already checked all possibilities, so\n # we know vmax < vmin. There is no solution and just return.\n return MinCostResult(x=None, xmax=step * xmax + offset, vmax=vmax, nfev=nfev)\n else:\n # we found the bracket that encloses maximum, perform golden section search\n a, x, b = fib2, fib1, fib0\n fx = v_prev\n while x > a + 1 or b > x + 1:\n u = a + b - x\n fu = f(step * u + offset)\n nfev += 1\n\n if fu >= fx:\n if u > x:\n a, x = x, u\n fx = fu\n else:\n x, b = u, x\n fx = fu\n\n if fx >= vmin:\n # found upper bound, use binary search to find answer\n stop = step * x + offset\n return minimize_cost_binary(f, vmin, start=step * (a + 1) + offset,\n stop=stop, save=stop, step=step,\n nfev=nfev)\n else:\n if u > x:\n b = u\n else:\n a = u\n\n # golden section search terminated, the maximum is less than vmin\n return MinCostResult(x=None, xmax=step * x + offset, vmax=fx, nfev=nfev)\n else:\n # still not close to maximum, continue searching\n vmax = v_prev = v_cur\n xmax = fib0\n cur_idx += 1\n if cur_idx <= 3:\n fib2, fib1, fib0 = fib1, fib0, cur_idx\n else:\n fib2, fib1, fib0 = fib1, fib0, fib1 + fib0\n\n raise ValueError('Maximum number of iteration achieved')", "def hyperopt_fmin(\n fn,\n space,\n algo,\n max_evals=sys.maxsize,\n timeout=None,\n loss_threshold=None,\n trials=None,\n rstate=None,\n allow_trials_fmin=True,\n pass_expr_memo_ctrl=None,\n catch_eval_exceptions=False,\n verbose=True,\n return_argmin=True,\n points_to_evaluate=None,\n max_queue_len=1,\n show_progressbar=True,\n # early_stop_fn=None,\n):\n if rstate is None:\n env_rseed = os.environ.get(\"HYPEROPT_FMIN_SEED\", \"\")\n if env_rseed:\n rstate = np.random.RandomState(int(env_rseed))\n else:\n rstate = np.random.RandomState()\n\n validate_timeout(timeout)\n validate_loss_threshold(loss_threshold)\n\n if allow_trials_fmin and hasattr(trials, \"fmin\"):\n assert False\n # return trials.fmin(\n # fn,\n # space,\n # algo=algo,\n # max_evals=max_evals,\n # timeout=timeout,\n # loss_threshold=loss_threshold,\n # max_queue_len=max_queue_len,\n # rstate=rstate,\n # pass_expr_memo_ctrl=pass_expr_memo_ctrl,\n # verbose=verbose,\n # catch_eval_exceptions=catch_eval_exceptions,\n # return_argmin=return_argmin,\n # show_progressbar=show_progressbar,\n # early_stop_fn=early_stop_fn,\n # )\n\n if trials is None:\n if points_to_evaluate is None:\n trials = base.Trials()\n else:\n assert type(points_to_evaluate) == list\n trials = generate_trials_to_calculate(points_to_evaluate)\n\n domain = base.Domain(fn, space, pass_expr_memo_ctrl=pass_expr_memo_ctrl)\n\n rval = FMinIter(\n algo,\n domain,\n trials,\n max_evals=max_evals,\n timeout=timeout,\n loss_threshold=loss_threshold,\n rstate=rstate,\n verbose=verbose,\n max_queue_len=max_queue_len,\n show_progressbar=show_progressbar,\n # early_stop_fn=early_stop_fn,\n )\n rval.catch_eval_exceptions = catch_eval_exceptions\n\n # next line is where the fmin is actually executed\n rval.exhaust()\n\n if len(trials.trials) == 0:\n raise Exception(\n \"There are no evaluation tasks, cannot return argmin of task losses.\"\n )\n return trials", "def minimum(x, y):\r\n # see decorator for function body\r", "def search_interval_3d(function, point_a, point_b, step, tol, max_iterations,\n show_process = False):\n # The first interval is created with the values [0, step]\n t = step\n # We use a parametrization for the line based on the two points given.\n # It is possible to get any point in the line changing the parameters t.\n # For t=0 we get the point_b and for t=1 we get the point_a.\n last_val = function( line_param(point_a, point_b, 0) )\n current_val = function( line_param(point_a, point_b, t) )\n \n # While the signs of the function evaluated in the ends of the interval is\n # the same.\n iterations = 0\n while last_val*current_val > 0:\n if iterations > max_iterations:\n raise Exception('Maximum iterations reached. But no solution was found.')\n # Update the step\n last_val = current_val\n t += step\n # Calculate the new value\n current_val = function( line_param(point_a, point_b, t) )\n iterations += 1\n \n # These point represent the interval for which a change is the signs exist.\n # This means that there is a point in this interval for which the function\n # is zero. We use bisection in this interval to find that point.\n left_point = line_param(point_a, point_b, t - step)\n right_point = line_param(point_a, point_b, t)\n \n if show_process: \n print('number of iterations to find interval = {0}'.format(iterations))\n print('interval found : [{0} , {1} ])'.format(left_point, right_point))\n \n return left_point, right_point", "def golden(\n f: Callable,\n a: Array,\n b: Array,\n c: Optional[Array] = None,\n args: Tuple = (),\n xtol: float = 1e-7,\n maxiter: int = 100,\n full_output: bool = False,\n) -> Union[Array, dict]:\n gr = 2 / (snp.sqrt(5) + 1)\n if c is None:\n c = b - gr * (b - a)\n d = a + gr * (b - a)\n for numiter in range(maxiter):\n fc = f(*((c,) + args))\n fd = f(*((d,) + args))\n b = snp.where(fc < fd, d, b)\n a = snp.where(fc >= fd, c, a)\n xerr = snp.amax(snp.abs(b - a))\n if xerr <= xtol:\n break\n c = b - gr * (b - a)\n d = a + gr * (b - a)\n\n fa = f(*((a,) + args))\n fb = f(*((b,) + args))\n idx = snp.argmin(snp.stack((fa, fb)), axis=0)\n x = snp.choose(idx, (a, b))\n if full_output:\n r = (x, {\"iter\": numiter, \"xerr\": xerr})\n else:\n r = x\n return r", "def bisect_search(f, a, b, precision_digits=2, maxiter=1e2):\n log = logging.getLogger('bisect_search')\n\n # Which of the bounds gives True? Can't be both!\n if f(a) == f(b):\n raise ValueError(\"f must not be true or false on both bounds\")\n true_on_a = f(a)\n\n log.debug(\"Starting search between %s (%s) and %s (%s)\"\n \" with %d precision digits\" % (a, f(a), b, f(b), precision_digits))\n\n # Do a bisection search, sticking to precision_digits\n for iter_i in range(int(maxiter)):\n\n # Find the new bisection point\n x = (a + b) / 2\n x = round_to_digits(x, precision_digits)\n\n # If we are down to a single point, return that\n if x == a or x == b:\n return x\n true_on_x = f(x)\n\n # Update the appropriate bound\n if true_on_a:\n if true_on_x:\n a = x\n else:\n b = x\n else:\n if true_on_x:\n b = x\n else:\n a = x\n\n log.debug(\"Iteration %d, searching between [%s and %s], last x was %s (%s)\" % (iter_i, a, b, x, true_on_x))\n\n else:\n raise RuntimeError(\"Infinite loop encountered in bisection search!\")", "def find_root(function, point_a, point_b, step, tol, max_iterations, \n show_process = False):\n left_point , right_point = search_interval_3d(function, point_a, point_b, \n step, tol, max_iterations,\n show_process)\n \n point_where_zero = bisection_3d(function, left_point, right_point, tol, \n max_iterations, show_process)\n \n return point_where_zero", "def minimize_bounded_start(candidates_func=candidate_start_points_random,\n *candidates_func_args, **candidates_func_kwargs):\n\n def minimize_bounded_start_dec(minimize_func):\n\n @wraps(minimize_func)\n def _minimize_bounded_start(fun, x0_bounds, *args, **kwargs):\n candidate_start_points = candidates_func(x0_bounds,\n *candidates_func_args,\n **candidates_func_kwargs)\n candidate_start_values = fun(candidate_start_points)\n min_start_point_ind = np.argmin(candidate_start_values)\n min_start_point = candidate_start_points[:, min_start_point_ind]\n res = minimize_func(fun, min_start_point, *args, **kwargs)\n res.start = min_start_point\n return res\n\n return _minimize_bounded_start\n\n return minimize_bounded_start_dec", "def integrate(func, a, b, tol=1e-8):\n left_pts = []\n result = integ(func, a, b, tol, 0, left_pts)\n\n return result, left_pts", "def test_bisection_1b(self):\n logging.info(\"\\nANSWERS TO EXERCISE 1.1B\")\n left = 0.5\n right = 3.1\n\n # The final interval should contain the desired root.\n root, (left, right) = undertest.bisection(self.func, left, right, self.maxit)\n self.assertTrue(_root_in_interval(self.desired_root, left, right))", "def evaluate(bounds , func):\n if len(bounds) != 2:\n raise ValueError(\"Bounds should contain 2 elements, found %d.\" % len(bounds))\n\n a = bounds[0]\n b = bounds[1]\n ya = func(a)\n yb = func((a+b)/2.)\n yc = func(b)\n I = (b-a) * (ya + 4. * yb + yc) / 6.\n return I", "def fixed_point(func, init, **kwargs):\n\n def fixed_func(mix):\n \"\"\"Labeling function for a fixed point\"\"\"\n return np.argmin((mix == 0) - mix + func(mix))\n\n return labeled_subsimplex(fixed_func, init, **kwargs)", "def relaxation(start_guess=1, func_to_relax=None, tolerance=1e-6):\r\n x1=start_guess\r\n x2=f(x1,c)\r\n while(np.abs(x2-x1) > tolerance):\r\n x1 = x2\r\n x2 = f(x2,c)\r\n return x2", "def minimize_cost_binary_float(f, # type: Callable[[float], float]\n vmin, # type: float\n start, # type: float\n stop, # type: float\n tol=1e-8, # type: float\n save=None, # type: Optional[float]\n nfev=0, # type: int\n ):\n # type: (...) -> MinCostResult\n bin_iter = FloatBinaryIterator(start, stop, tol=tol)\n while bin_iter.has_next():\n x_cur = bin_iter.get_next()\n v_cur = f(x_cur)\n nfev += 1\n\n if v_cur >= vmin:\n save = x_cur\n bin_iter.down()\n else:\n bin_iter.up()\n return MinCostResult(x=save, xmax=None, vmax=None, nfev=nfev)", "def the_function(interval):\n if math.ceil(interval.upper) % 2:\n return interval * type(interval).closed(\n fractions.Fraction(3, 2),\n fractions.Fraction(3, 2)\n )\n else:\n return interval * type(interval).closed(\n fractions.Fraction(1, 2),\n fractions.Fraction(1, 2)\n )", "def bracketing(f, x0, J0, g0, a, amax, mu1, mu2, p):\r\n a0 = 0\r\n a1 = a\r\n Ja0 = J0\r\n ga0 = g0\r\n Jprime = g0.T@p\r\n i = 0\r\n Ja1, ga1 = f(x0+a1*p)\r\n while True:\r\n if (Ja1 > (J0 + mu1*a1*Jprime)) or (Ja1 > Ja0 and i > 1):\r\n Ja, ga, astar = pinpoint(f, J0, g0, x0, mu1, mu2, \\\r\n a0, Ja0, ga0, a1, Ja1, ga1, p)\r\n return Ja, ga, astar\r\n Jprimea1 = ga1.T@p\r\n if (np.abs(Jprimea1) <= np.abs(mu2*Jprime)):\r\n astar = a1\r\n return Ja1, ga1, astar\r\n elif (Jprimea1 >= 0):\r\n Ja, ga, astar = pinpoint(f, J0, g0, x0, mu1, mu2, \\\r\n a1, Ja1, ga1, a0, Ja0, ga0, p)\r\n return Ja, ga, astar\r\n else:\r\n a0 = a1\r\n Ja0 = Ja1\r\n a1 = 1.2*a1\r\n Ja1, ga1 = f(x0+a1*p)\r\n i += 1", "def solve_eq(preswet,func):\n\n from numpy import sign,diff\n\n # Sorry to be annoying but I'm going to force you to use\n # a monotonically increasing variable\n #assert (sign(diff(preswet))==1).all(), \"Use a monotonically increasing abscissa\"\n\n # Identify changes in sign of function\n dsign=sign(func)\n isdiff=np.zeros(dsign.shape,dtype=bool)\n isdiff[1:]=abs(diff(dsign)).astype(bool)\n\n # shift to get the value on the other side\n # of the x-axis\n shift=np.zeros(dsign.shape,dtype=bool)\n shift[:-1]=isdiff[1:]; shift[-1]=isdiff[0]\n\n # solve by linear interpolation between \n # values points\n sols=np.zeros((isdiff.sum()))\n stab=np.zeros((isdiff.sum()))\n for ii in range(isdiff.sum()):\n f0=func[isdiff][ii]\n f1=func[shift][ii]\n p0=preswet[isdiff][ii]\n p1=preswet[shift][ii]\n slope=(f1-f0)/(p1-p0)\n sols[ii]=p0-f0/slope\n stab[ii]=sign(slope)\n\n ### Debug with plots\n #fig=plt.figure()\n #ax=fig.add_subplot(111)\n #ax.plot(preswet,func)\n #ax.plot(sols,zeros(sols.shape),ls='',marker='o')\n #ax.plot(preswet[isdiff],func[isdiff],ls='',marker='+',mew=2)\n #ax.plot(preswet[shift],func[shift],ls='',marker='x',mew=2)\n #ax.grid(True)\n #show()\n\n return sols,stab", "def integrate_stack(fun, a, b, tol=1e-8):\n stack = [(a, b)]\n tol = tol/(b-a) # fix tolerance (so err < tol*(b-a))\n total = 0\n min_size = (b-a)/2**(MAX_DEPTH)\n intervals = []\n while stack:\n a, b = stack.pop()\n mid = 0.5*(a + b)\n a1 = simp(fun, a, b)\n a2 = simp(fun, a, mid) + simp(fun, mid, b)\n err_approx = (16/15)*abs(a1 - a2)\n\n if b-a <= min_size or err_approx < tol*(b-a):\n total += a1\n intervals.append((a, b))\n else:\n stack.extend([(a, mid), (mid, b)])\n\n return total, intervals", "def argmin(function, X, tiesolve=None):\n X = [(x, function(x)) for x in sorted(X, key=function)]\n X = [x for x, y in itertools.takewhile(lambda pair: pair[1] == X[0][1], X)]\n return tiesolve(X) if tiesolve is not None else X", "def bisection_method(x, y):\n if y[0] * y[1] < 0:\n return [x[0], (x[0] + x[1])/2, x[1]]\n elif y[1] * y[2] < 0:\n return [x[1], (x[1] + x[2])/2, x[2]]\n else:\n raise ValueError(\"No valid root detected by binary search in provided bounds\")", "def find_root(f, df, ddf, initial_guess = 0.0, limit = 0.00001, max_iterations = 1000):\n xn_1 = initial_guess\n i = 0\n while i < max_iterations:\n fx = f(xn_1)\n dfx = df(xn_1)\n ddfx = ddf(xn_1)\n xn = xn_1 - 2 * fx * dfx / (2 * dfx ** 2 - fx * ddfx)\n if abs(xn - xn_1) < limit:\n return xn\n xn_1 = xn\n i += 1\n return None", "def test_curve_apply():\n c = Curve(data=data_num)\n c2 = c.apply(window_length=3)\n c3 = c.apply(window_length=3, func1d=np.min)\n\n assert c2.df.iloc[0][0] - 4.491228070175438 < 0.0001\n assert c3.df.iloc[0][0] - 1 < 0.0001", "def solve(self, f: sym.Function, start: float):\n if self.__doVisual:\n self.__visualisiere(f, style='r', description='f(' + str(self.__symbol) + ')=' + str(f))\n\n self.__ableitung = f.diff(self.__symbol)\n self.__funktion = f\n self.__tries = 0\n current_point = start\n current_div = abs(f.evalf(subs={self.__symbol: current_point}))\n # wiederholen bis genauigkeit erreicht oder das Maximum der Versuche erreicht ist\n while current_div > self.__precision and self.__tries < self.__max_tries:\n current_point = self.__do_newton(f, current_point)\n result = self.__evaluate(current_point)\n current_div = abs(result)\n self.__tries = self.__tries + 1\n\n if self.__doVisual:\n self.__visual_result()\n return current_point, current_div", "def objective(trial, \n bounds: Optional[Iterable]=None, \n func: Optional[Callable]=None, \n param_names: Optional[List[str]]=None):\n if param_names is None:\n param_names = PARAM_NAMES\n if (bounds is None):\n bounds = ((-10, 10) for _ in param_names)\n if not isinstance(bounds, dict):\n bounds = dict((p, (min(b), max(b))) \n for p, b in zip(param_names, bounds))\n if func is None:\n func = DEFAULT_METRIC_FUNC\n\n params = dict(\n (p, trial.suggest_float(p, bounds.get(p)[0], bounds.get(p)[1])) \n for p in param_names \n )\n # x = trial.suggest_float('x', -10, 10)\n return func((params[p] for p in param_names))", "def find_f_equals_1():\n f1 = scipy.optimize.brentq(f2, 0, -10)\n return f1", "def sp_solve ( fun , xmin , xmax , C = 0 , args = () ) :\n ##\n if iszero ( C ) :\n return findroot ( fun , xmin , xmax , args = args )\n ##\n func = lambda x , *a : fun(x,*a)-C\n return findroot ( func , xmin , xmax , args = args )", "def test_fixed_point(testFunctions, tol, printFlag): \n pass", "def bisect_right(func, val, low, high):\n a = low\n b = high\n while b > a:\n guess = (a+b)//2\n\n if val >= func(guess):\n a = guess+1\n else:\n b = guess\n\n return a", "def minimize_cost_binary(f, # type: Callable[[int], float]\n vmin, # type: float\n start=0, # type: int\n stop=None, # type: Optional[int]\n step=1, # type: int\n save=None, # type: Optional[int]\n nfev=0, # type: int\n ):\n # type: (...) -> MinCostResult\n bin_iter = BinaryIterator(start, stop, step=step)\n while bin_iter.has_next():\n x_cur = bin_iter.get_next()\n v_cur = f(x_cur)\n nfev += 1\n\n if v_cur >= vmin:\n save = x_cur\n bin_iter.down()\n else:\n bin_iter.up()\n return MinCostResult(x=save, xmax=None, vmax=None, nfev=nfev)", "def fixed_func(mix):\n return np.argmin((mix == 0) - mix + func(mix))", "def bisect(left, right, f, epsilon=None, eta=0, verbose=False, niter_max=200):\n\n if epsilon is None:\n epsilon = (right - left) * 1.e-7\n\n logger.info(\"Entering bisection search algorithm\")\n for i in range(niter_max):\n\n if (right - left) < epsilon:\n return (right + left) / 2\n\n mid = (left + right) / 2\n z = f(mid)\n\n logger.info(f\"{left:f} {mid:f} {right:f} {z:f}\")\n\n if (abs(z) < eta):\n return mid\n elif(z < 0):\n left = mid\n else:\n right = mid\n\n raise ValueError(\"Bisection algorithm did not converge\")", "def brents(f, x0, x1, max_iter=50, tolerance=1e-5):\n \n fx0 = f(x0)\n fx1 = f(x1)\n \n assert (fx0 * fx1) <= 0, \"Root not bracketed\" \n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n \n x2, fx2 = x0, fx0\n \n d = np.nan\n mflag = True\n steps_taken = 0\n \n while steps_taken < max_iter and abs(x1-x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n \n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n \n else:\n new = x1 - ( (fx1 * (x1 - x0)) / (fx1 - fx0) )\n \n tt1 = (new < ((3 * x0 + x1) / 4) or new > x1)\n tt2 = (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2))\n tt3 = (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2))\n tt4 = (mflag == True and (abs(x1 - x2)) < tolerance)\n tt5 = (mflag == False and (abs(x2 - d)) < tolerance)\n if (tt1 or\n tt2 or\n tt3 or\n tt4 or\n tt5):\n new = (x0 + x1) / 2\n mflag = True\n \n else:\n mflag = False\n \n fnew = f(new)\n d, x2 = x2, x1\n \n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n \n steps_taken += 1\n \n return x1, steps_taken", "def find_fmin_on_grid(f, xs, args, full_output):\n Nx = len(xs)\n Jout = np.zeros(Nx)\n for k in range(Nx):\n Jout[k] = f(xs[k], *args)\n idx = np.nanargmin(Jout)\n if not full_output:\n return xs[idx], Jout[idx]\n return xs[idx], Jout[idx], xs, Jout", "def find_opt_func(W, x0, N, M, h_initial=None):\n if h_initial is None:\n h_initial = np.ones(N, dtype=float)\n h = leastsq(func_to_min, h_initial, args=(x0, M, W), full_output=True)[0]\n return OptFunc(W, x0, h, M)", "def intsolve(func, value, start=None, stop=None, maxit=1000):\n from scipy import inf\n from scipy.integrate import quad\n from warnings import warn\n if start is None:\n start = -inf\n if stop is None:\n stop = inf\n lower_bound = start\n upper_bound = stop\n it = 0\n while it < maxit:\n it += 1\n if upper_bound == inf:\n if lower_bound == -inf:\n test_bound = 0\n elif lower_bound < 1.0:\n test_bound = 1.0\n else:\n test_bound = lower_bound * 2\n else:\n if lower_bound == -inf:\n if upper_bound > -1.0:\n test_bound = -1.0\n else:\n test_bound = upper_bound * 2\n else:\n test_bound = (lower_bound + upper_bound) / 2.0\n (test_value, err) = quad(func, start, test_bound)\n if abs(value - test_value) <= err:\n break\n elif value < test_value:\n upper_bound = test_bound\n else:\n lower_bound = test_bound\n\n if abs(value - test_value) > err:\n warn('Difference between desired value and actual is ' +\n str(abs(value - test_value)) +\n ', greater than integral error ' +\n str(err), UserWarning, stacklevel=2)\n return test_bound", "def fibonacci_search(loss_function: rosenbrock, start: point, direction: list, epsilon=1) -> float:\n a, b = advance_retreat_method(loss_function, start, direction)\n\n # build the Fibonacci series\n F, d = [1.0, 2.0], (b - a) / epsilon\n while F[-1] < d: F.append(F[-1] + F[-2])\n\n # find the minimum\n N = len(F) - 1\n p, q = a + (1 - F[N - 1] / F[N]) * (b - a), a + F[N - 1] / F[N] * (b - a)\n while abs(a - b) > epsilon and N > 0:\n N = N - 1\n f_p = loss_function.f(start + point(direction[0] * p, direction[1] * p))\n f_q = loss_function.f(start + point(direction[0] * q, direction[1] * q))\n if f_p < f_q:\n b, q = q, p\n p = a + (1 - F[N - 1] / F[N]) * (b - a)\n else:\n a, p = p, q\n q = a + F[N - 1] / F[N] * (b - a)\n\n return (a + b) / 2", "def bisection(f, fu, point_a, point_b, point_c, point_d, lower_bound, upper_bound, length):\n n = 1\n theta = 0\n a = lower_bound\n b = upper_bound\n while n <= 100:\n theta = (a + b) / 2.0\n if -1e-6 < f(fu(point_a, point_b, point_c, theta), point_d) - length < 1e-6:\n # print 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n # print 'iteration', n\n return theta\n else:\n n = n + 1\n if f(fu(point_a, point_b, point_c, theta), point_d) - length > 0:\n b = theta\n else:\n a = theta\n\n print 'failedtheta', theta, 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n print 'iteration', n\n return False", "def getMin(listOfVCFs, compareFun, numMins = 1):\n # \"\"\"Returns the numMin keys with smallest values in the list\"\"\"\n return min(listOfVCFs, key = compareFun)", "def wegstein_secant(f, x0, x1, xtol, ytol=5e-8, args=(), maxiter=50):\n _abs = abs\n y0 = f(x0, *args)\n if _abs(y0) < ytol: return x0\n y1 = f(x1, *args)\n if _abs(y1) < ytol: return x0\n g0 = x1 - y1*(x1-x0)/(y1-y0)\n y0 = y1\n dx = g0-x1\n x1 = g0\n for iter in range(maxiter):\n y1 = f(x1, *args)\n g1 = x1 - y1*dx/(y1-y0)\n x0 = x1\n try:\n w = dx/(dx-g1+g0)\n x1 = w*g1 + (1.-w)*x1\n except:\n x1 = g1\n dx = x1-x0\n if _abs(dx) < xtol or _abs(y1) < ytol: return x1\n y0 = y1\n g0 = g1\n raise SolverError(f'failed to converge after {maxiter} iterations')", "def newtons_method_1d(f, df_dx, x0, tol):\n # begin solution\n x = x0\n while abs(f(x)) > tol:\n x -= f(x) / df_dx(x)\n return x\n # end solution", "def js_fgan_lower_bound(f):\n f_diag = tf.linalg.tensor_diag_part(f)\n first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))\n n = tf.cast(f.shape[0], tf.float32)\n second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))\n return first_term - second_term", "def structured_minimum(x, y):\r\n # see decorator for function body\r", "def tf_tolerance(\n x,\n bounds=(0.0, 0.0),\n margin=0.0,\n value_at_margin=rewards._DEFAULT_VALUE_AT_MARGIN, # pylint: disable=protected-access\n sigmoid=\"gaussian\",\n):\n if sigmoid == \"gaussian\":\n sigmoid_fn = gaussian_sigmoid\n elif sigmoid == \"long_tail\":\n sigmoid_fn = long_tail_sigmoid\n elif sigmoid == \"linear\":\n sigmoid_fn = linear_sigmoid\n elif sigmoid == \"quadratic\":\n sigmoid_fn = quadratic_sigmoid\n else:\n raise NotImplementedError\n lower, upper = bounds\n in_bounds = tf.logical_and(lower <= x, x <= upper)\n d = tf.where(x < lower, lower - x, x - upper) / margin\n value = tf.where(in_bounds, tf.ones_like(d), sigmoid_fn(d, value_at_margin))\n return value", "def myleastsq(errfunc0,x0,args=None,bounds=None,**exkw):\n from scipy import optimize\n if hasattr(optimize,'minimize'):\n def errfunc(x,*iargs):\n return sum(errfunc0(x,*iargs)**2)\n if args is not None: exkw['args'] = args\n res = optimize.minimize(errfunc,x0[:],bounds=bounds,**exkw)\n return res.x,res.success\n else:\n lres = sys.float_info.max\n def errfunc(x,*iargs):\n if bounds!=None:\n for idx in range(len(x)):\n if bounds[idx][0]!=None and x[idx]<bounds[idx][0]: return lres\n if bounds[idx][1]!=None and x[idx]>bounds[idx][1]: return lres\n return errfunc0(x,*iargs)\n if args is not None: exkw['args'] = args\n return optimize.leastsq(errfunc,x0,**exkw)", "def golden_search(loss_function: rosenbrock, start: point, direction: list, epsilon=0.1) -> float:\n a, b = advance_retreat_method(loss_function, start, direction)\n\n # find the minimum\n golden_num = (math.sqrt(5) - 1) / 2\n p, q = a + (1 - golden_num) * (b - a), a + golden_num * (b - a)\n while abs(a - b) > epsilon:\n f_p = loss_function.f(start + point(direction[0] * p, direction[1] * p))\n f_q = loss_function.f(start + point(direction[0] * q, direction[1] * q))\n if f_p < f_q:\n b, q = q, p\n p = a + (1 - golden_num) * (b - a)\n else:\n a, p = p, q\n q = a + golden_num * (b - a)\n\n return (a + b) / 2", "def between(min, max):\n def func(x):\n return min <= x <= max\n return func", "def lower_bound(self) -> float:\n ...", "def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)", "def func_exact_sol(x, case):\n TB, TA, L, q, k = get_valdict(case, 'TB,TA,L,q,k')\n return ((TB-TA)/L + (q/(2*k)*(L-x)))*x + TA", "def find_poly_root(poly, initial_guess = 0.0, limit = 0.00001, max_iterations = 1000):\n # Calculate the polynomial derivatives\n dpoly = polynomial.derivative(poly)\n ddpoly = polynomial.derivative(dpoly)\n # Closures !!!\n f = lambda x: polynomial.eval(poly, x)\n df = lambda x: polynomial.eval(dpoly, x)\n ddf = lambda x: polynomial.eval(ddpoly, x)\n # Call the generic root finder\n return find_root(f, df, ddf, initial_guess, limit, max_iterations)", "def uniform_search(fun, a, b, E, n=3, counter=0):\n if b - a < E:\n return (b + a) / 2, counter\n step = (b - a) / n\n xn = a + step\n min_x = a\n min_f = fun(a)\n while xn <= b:\n counter += 1\n f = fun(xn)\n if f < min_f:\n min_x = xn\n min_f = f\n xn += step\n counter += 2\n if fun(min_x - step) < fun(min_x + step):\n return uniform_search(fun, min_x - step, min_x, E, n, counter)\n return uniform_search(fun, min_x, min_x + step, E, n, counter)", "def _linesearch_powell(func, p, xi, tol=1e-3):\n def myfunc(alpha):\n return func(p + alpha * xi)\n alpha_min, fret, iter, num = optimize.brent(myfunc, full_output=1, tol=tol)\n xi = alpha_min*xi\n return squeeze(fret), p+xi, xi", "def rkf( f, a, b, x0, tol, hmax, hmin ):\n\n # Coefficients used to compute the independent variable argument of f\n\n a2 = 2.500000000000000e-01 # 1/4\n a3 = 3.750000000000000e-01 # 3/8\n a4 = 9.230769230769231e-01 # 12/13\n a5 = 1.000000000000000e+00 # 1\n a6 = 5.000000000000000e-01 # 1/2\n\n # Coefficients used to compute the dependent variable argument of f\n\n b21 = 2.500000000000000e-01 # 1/4\n b31 = 9.375000000000000e-02 # 3/32\n b32 = 2.812500000000000e-01 # 9/32\n b41 = 8.793809740555303e-01 # 1932/2197\n b42 = -3.277196176604461e+00 # -7200/2197\n b43 = 3.320892125625853e+00 # 7296/2197\n b51 = 2.032407407407407e+00 # 439/216\n b52 = -8.000000000000000e+00 # -8\n b53 = 7.173489278752436e+00 # 3680/513\n b54 = -2.058966861598441e-01 # -845/4104\n b61 = -2.962962962962963e-01 # -8/27\n b62 = 2.000000000000000e+00 # 2\n b63 = -1.381676413255361e+00 # -3544/2565\n b64 = 4.529727095516569e-01 # 1859/4104\n b65 = -2.750000000000000e-01 # -11/40\n\n # Coefficients used to compute local truncation error estimate. These\n # come from subtracting a 4th order RK estimate from a 5th order RK\n # estimate.\n\n r1 = 2.777777777777778e-03 # 1/360\n r3 = -2.994152046783626e-02 # -128/4275\n r4 = -2.919989367357789e-02 # -2197/75240\n r5 = 2.000000000000000e-02 # 1/50\n r6 = 3.636363636363636e-02 # 2/55\n\n # Coefficients used to compute 4th order RK estimate\n\n c1 = 1.157407407407407e-01 # 25/216\n c3 = 5.489278752436647e-01 # 1408/2565\n c4 = 5.353313840155945e-01 # 2197/4104\n c5 = -2.000000000000000e-01 # -1/5\n\n # Set t and x according to initial condition and assume that h starts\n # with a value that is as large as possible.\n \n t = a\n x = numpy.array(x0)\n h = hmax\n\n # Initialize arrays that will be returned\n\n T = numpy.array( [t] )\n X = numpy.array( [x] )\n \n while t < b:\n\n # Adjust step size when we get to last interval\n\n if t + h > b:\n h = b - t;\n\n # Compute values needed to compute truncation error estimate and\n # the 4th order RK estimate.\n\n k1 = h * f( x, t )\n k2 = h * f( x + b21 * k1, t + a2 * h )\n k3 = h * f( x + b31 * k1 + b32 * k2, t + a3 * h )\n k4 = h * f( x + b41 * k1 + b42 * k2 + b43 * k3, t + a4 * h )\n k5 = h * f( x + b51 * k1 + b52 * k2 + b53 * k3 + b54 * k4, t + a5 * h )\n k6 = h * f( x + b61 * k1 + b62 * k2 + b63 * k3 + b64 * k4 + b65 * k5, \\\n t + a6 * h )\n\n # Compute the estimate of the local truncation error. If it's small\n # enough then we accept this step and save the 4th order estimate.\n \n r = abs( r1 * k1 + r3 * k3 + r4 * k4 + r5 * k5 + r6 * k6 ) / h\n if len( numpy.shape( r ) ) > 0:\n r = max( r )\n if r <= tol:\n t = t + h\n x = x + c1 * k1 + c3 * k3 + c4 * k4 + c5 * k5\n T = numpy.append( T, t )\n X = numpy.append( X, [x], 0 )\n\n # Now compute next step size, and make sure that it is not too big or\n # too small.\n\n h = h * min( max( 0.84 * ( tol / r )**0.25, 0.1 ), 4.0 )\n\n if h > hmax:\n h = hmax\n elif h < hmin:\n raise RuntimeError(\"Error: Could not converge to the required tolerance %e with minimum stepsize %e.\" % (tol,hmin))\n break\n # endwhile\n\n return ( T, X )", "def iqi(f, a, b, ztol, maxiter):\n\txnm2 = a\n\tfnm2 = f(a)\n\txnm1 = b\n\tfnm1 = f(b)\n\txn = a + (b-a)*0.5\n\tfn = f(xn)\n\tfasign = (fnm2 < 0.0)\n\tif (fnm1< 0.0) == fasign:\n\t return None\n \n\tswitch = True\n\tfor i in range(maxiter):\n\t\tprint \"i, xn, fn, a, b, fnm2, fnm1=\", i, xn, fn,a, b, fnm2, fnm1\n\t\t#Check for near equal function values.\n\t\tif abs(fnm2-fnm1)< ztol or \\\n\t\t abs(fnm1-fn) < ztol or\\\n\t\t abs(fn-fnm2) < ztol:\n \n\t\t #ensure bisection is used if this is the case\n\t\t if switch:\n\t\t\t print \"switching to bisection\",\n\t\t\t switch = not switch\n\t\tif switch:\n\t\t # perform quadratic interpolation\n\t\t print \"iqi:\",\n\t\t xest = invqinterp(xnm2, xnm1, xn,\n\t\t\t\t\t\t\t fnm2, fnm1, fn)\n\t\telse:\n\t\t print \"biseciton:\",\n\t\t xest = a + (b-a) * 0.5\n \n\t\tswitch = not switch\n \n\t\tfxest= f(xest)\n\t\tprint \"xest, fxest =\", xest, fxest\n\t\tif abs(fxest) < ztol:\n\t\t print \"tolerance met.\"\n\t\t return xest, fxest, i+1\n \n\t\tif (fxest < 0) == fasign:\n\t\t xnm2 = xest\n\t\t fnm2 = fxest\n\t\t a = xest\n\t\telse:\n\t\t xnm1 = xest\n\t\t fnm1 = fxest\n\t\t b = xest\n \n\t\txn = a + (b-a)*0.5\n\t\tfn = f(xn)\n\t\tif abs(b-a) < ztol:\n\t\t return (xn, fn,i+1)\n\treturn xn, fn, maxiter", "def atmin(a,lowerlimit=None,dimension=None,inclusive=1):\r\n if inclusive: lowerfcn = N.greater\r\n else: lowerfcn = N.greater_equal\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if lowerlimit == None:\r\n lowerlimit = N.minimum.reduce(N.ravel(a))-11\r\n biggest = N.maximum.reduce(N.ravel(a))\r\n ta = N.where(lowerfcn(a,lowerlimit),a,biggest)\r\n return N.minimum.reduce(ta,dimension)", "def test_bisection_system(testFunctions,tol, printFlag):\n pass", "def minimum(x,y,z):\r\n\treturn min(min(x,y),z)", "def find_peak(func, low, high):\n def derivative(x):\n return func(x)-func(x+1)\n\n a = bisect_left(derivative, 0, low, high)\n\n result = func(a)\n index = a\n\n # Unfortunately the above only finds a value where there is no change, so we have to continue searching for the\n # maximum value. The binary search brings us close enough that this isn't an issue for most functions.\n\n # Search forward\n k = a\n while k <= high:\n d2 = func(k)\n if d2 < result:\n break\n else:\n result = d2\n index = k\n k += 1\n\n # Search backward\n k = a\n while k >= low:\n d2 = func(k)\n if d2 < result:\n break\n else:\n result = d2\n index = k\n k -= 1\n\n return index", "def mini(a, b):\n return min(a, b)", "def _find_closest_in_range(ranges: Iterable[CT], what_to_find: CT) -> Optional[CT]:\n\n ranges = sorted(ranges)\n\n while ranges:\n\n middle_item_index = len(ranges) // 2\n middle_item = ranges[middle_item_index]\n\n if what_to_find == middle_item:\n return what_to_find\n\n elif what_to_find > middle_item:\n\n if len(ranges) == 1:\n return middle_item\n\n ranges = ranges[middle_item_index:]\n\n elif what_to_find < middle_item:\n\n if ranges[middle_item_index - 1] < what_to_find:\n return ranges[middle_item_index - 1]\n\n ranges = ranges[:middle_item_index]", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def test_lt_1():\n a = FixedPoint(1, 'Q2.8')\n assert a < 1.1", "def intersec(f1, f2, f3=None, f4=None, f5=None):\n from numpy import minimum\n\n y = minimum(f1,f2)\n if f3 != None: y = minimum(y,f3)\n if f4 != None: y = minimum(y,f4)\n if f5 != None: y = minimum(y,f5)\n y = y.astype(f1.dtype)\n return y", "def secant_method(func, first, second, err):\n\n if dim_complete(func) and dim_complete(first) and dim_complete(second) and dim_complete(err):\n if dim_type(first) and dim_type(second) and dim_type(err)and callable(func):\n if dim_sign(err):\n x0, x1 = first, second\n while fabs(func(x1)) > err and func(x0) != func(x1):\n x2 = (x0*func(x1) - x1*func(x0))/(func(x1) - func(x0))\n x0 = x1\n x1 = x2\n root = x1\n return root\n else:\n raise ValueError(\"Please enter positive numbers: \"+str(err))\n raise TypeError(\"One or more arguments is(are) not number(s)! Enter a Number\")\n raise AttributeError(\"OH!!! one of the arguments was forgotten, Check it!\")", "def find_start_index():\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)", "def test_le_1():\n a = FixedPoint(1, 'Q2.8')\n assert a < 1.1", "def prove_interval(interval, proven=FractionInterval.open_closed(0, 1), verbose=0):\n unproven = break_at_integer(interval)\n if verbose:\n i = 0\n print(i, unproven)\n while unproven:\n applied_intervals = [the_function(interval) for interval in unproven]\n if verbose > 1:\n print(\"applied\", applied_intervals)\n broken_intervals = []\n for interval in applied_intervals:\n broken_intervals.extend(break_at_integer(interval))\n if verbose > 1:\n print(\"broken\", broken_intervals)\n merged_intervals = []\n used = False\n for interval0, interval1 in zip(broken_intervals, broken_intervals[1:]):\n if used:\n used = False\n continue\n if interval0.upper == interval1.lower and interval1.lower.denominator != 1:\n merged_intervals.append(interval0 | interval1)\n used = True\n else:\n merged_intervals.append(interval0)\n if not used:\n merged_intervals.append(broken_intervals[-1])\n if verbose > 1:\n print(\"merged\", merged_intervals)\n unproven = [interval for interval in merged_intervals if not interval.is_contained_in(proven)]\n if verbose:\n i += 1\n print(i, unproven)\n\n return True", "def success(self, x, tol=1.e-5):\n val = self.fun(asarray(x))\n if abs(val - self.fglob) < tol:\n return True\n\n # the solution should still be in bounds, otherwise immediate fail.\n if np.any(x > np.asfarray(self.bounds)[:, 1]):\n return False\n if np.any(x < np.asfarray(self.bounds)[:, 0]):\n return False\n\n # you found a lower global minimum. This shouldn't happen.\n if val < self.fglob:\n raise ValueError(\"Found a lower global minimum\",\n x,\n val,\n self.fglob)\n\n return False", "def binary_search(f: Callable, eps: float, a: float, b: float = None,\n display: bool = False, max_iterations: int = 100) -> float:\n x = np.nan\n find_b = False\n if b is None:\n find_b = True\n b = a + 1\n for _ in range(max_iterations):\n x = (a + b) / 2\n f_x = f(x)\n\n if display:\n import matplotlib.pyplot as plt\n xx0 = a\n xx1 = b\n xx = np.linspace(xx0, xx1, 100)\n yy = np.array(list(map(f, xx)))\n plt.plot(xx, yy)\n plt.axvline(x=x)\n plt.show()\n\n if f_x > 0:\n a = x\n if find_b:\n b = 2*max(b, 1)\n else:\n b = x\n find_b = False\n\n if abs(f_x) <= eps:\n break\n else:\n # print(\"Error: Reached maximum iteration\", b)\n pass\n return x", "def search_true_instance(f, a, b, precision_digits=3, maxiter=10, log=None):\n log = logging.getLogger('search_true_instance')\n\n values_searched = [a, b]\n log.debug(\"Starting exploratory search in [%s, %s]\" % (a, b))\n\n for iter_i in range(maxiter):\n # First test halfway, point then 1/4 and 3/4, then 1/8, 3/8, 5/8, 7/8, etc.\n fractions = 2**(iter_i + 1)\n search_points = [round_to_digits(a + (b - a)*fr, precision_digits)\n for fr in np.arange(1, fractions, 2)/fractions]\n log.debug(\"Searching %s - %s (%d points)\" % (search_points[0], search_points[-1], len(search_points)))\n\n for x_i, x in enumerate(search_points):\n if f(x):\n values_searched = np.array(values_searched)\n return x, np.max(values_searched[values_searched < x]), np.min(values_searched[values_searched > x])\n else:\n values_searched.append(x)\n\n if len(search_points) > 1 and np.any(np.diff(search_points) == 0):\n raise SearchFailedException(\"No true value found in search region [%s, %s], \"\n \"but search depth now lower than precision digits (%s). \"\n \"Iteration %d.\" % (a, b, precision_digits, iter_i))\n\n raise ValueError(\"Exploratory search failed to converge or terminate - bug? excessive precision?\")", "def _find_root(function, N, squared_integers, grid_data_dct2):\n\n # From the implementation by Botev, the original paper author\n # Rule of thumb of obtaining a feasible solution\n N2 = tf.math.maximum(\n tf.math.minimum(tf.constant(1050, ztypes.float), N),\n tf.constant(50, ztypes.float),\n )\n tol = 10e-12 + 0.01 * (N2 - 50) / 1000\n left_bracket = tf.constant(0.0, dtype=ztypes.float)\n right_bracket = tf.constant(10e-12, ztypes.float) + tf.constant(\n 0.01, ztypes.float\n ) * (N2 - tf.constant(50, ztypes.float)) / tf.constant(1000, ztypes.float)\n\n converged = tf.constant(False)\n t_star = tf.constant(0.0, dtype=ztypes.float)\n\n def fixed_point_function(t):\n return _fixed_point(t, N, squared_integers, grid_data_dct2)\n\n def condition(right_bracket, converged, t_star):\n return tf.math.logical_not(converged)\n\n def body(right_bracket, converged, t_star):\n t_star, value_at_t_star, num_iterations, converged = root_search.brentq(\n fixed_point_function, left_bracket, right_bracket, None, None, 2e-12\n )\n\n t_star = t_star - value_at_t_star\n\n right_bracket = right_bracket * tf.constant(2.0, ztypes.float)\n\n return right_bracket, converged, t_star\n\n # While a solution is not found, increase the tolerance and try again\n right_bracket, converged, t_star = tf.while_loop(\n condition, body, [right_bracket, converged, t_star]\n )\n\n return t_star", "def bounds(x, xMin, xMax):\n if (x < xMin):\n x = xMin\n elif (x > xMax):\n x = xMax\n return(x)" ]
[ "0.6708032", "0.65304935", "0.62501", "0.5912542", "0.58930385", "0.58231115", "0.5812576", "0.5808945", "0.57922816", "0.5788117", "0.5785699", "0.57082534", "0.56653464", "0.5637875", "0.5637021", "0.5635716", "0.5600486", "0.55882996", "0.55636096", "0.5525852", "0.55034405", "0.5473664", "0.54350126", "0.5433812", "0.54315805", "0.54117507", "0.5403311", "0.53836155", "0.5382539", "0.53695226", "0.5359882", "0.5345911", "0.53246534", "0.53246194", "0.53145385", "0.5274898", "0.52684206", "0.5232355", "0.5209441", "0.5201645", "0.518831", "0.518314", "0.51811254", "0.51411176", "0.5132622", "0.5124699", "0.5113137", "0.5112008", "0.51073694", "0.51030827", "0.5097597", "0.508861", "0.50820965", "0.5078351", "0.5071501", "0.50682276", "0.50616467", "0.5047306", "0.504222", "0.5038512", "0.50221366", "0.501189", "0.5009371", "0.5007554", "0.5000119", "0.4992282", "0.49909437", "0.4975301", "0.49681774", "0.49580002", "0.49330398", "0.4922529", "0.49212903", "0.49211365", "0.49148864", "0.4913695", "0.49127144", "0.49059945", "0.48976985", "0.48969546", "0.48965523", "0.48947674", "0.48937833", "0.48931506", "0.48874953", "0.4874604", "0.48706004", "0.48694608", "0.48689425", "0.48639333", "0.48617032", "0.4859284", "0.48525012", "0.4851004", "0.48483112", "0.48457518", "0.4845274", "0.48430175", "0.48356768", "0.4832148" ]
0.5996638
3
Given a function and distinct initial points, search in the downhill direction (as defined by the initital points) and return new points
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000): _gold = 1.618034 _verysmall_num = 1e-21 fa = apply(func, (xa,)+args) fb = apply(func, (xb,)+args) if (fa < fb): # Switch so fa > fb dum = xa; xa = xb; xb = dum dum = fa; fa = fb; fb = dum xc = xb + _gold*(xb-xa) fc = apply(func, (xc,)+args) funcalls = 3 iter = 0 while (fc < fb): tmp1 = (xb - xa)*(fb-fc) tmp2 = (xb - xc)*(fb-fa) val = tmp2-tmp1 if abs(val) < _verysmall_num: denom = 2.0*_verysmall_num else: denom = 2.0*val w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom wlim = xb + grow_limit*(xc-xb) if iter > maxiter: raise RuntimeError, "Too many iterations." iter += 1 if (w-xc)*(xb-w) > 0.0: fw = apply(func, (w,)+args) funcalls += 1 if (fw < fc): xa = xb; xb=w; fa=fb; fb=fw return xa, xb, xc, fa, fb, fc, funcalls elif (fw > fb): xc = w; fc=fw return xa, xb, xc, fa, fb, fc, funcalls w = xc + _gold*(xc-xb) fw = apply(func, (w,)+args) funcalls += 1 elif (w-wlim)*(wlim-xc) >= 0.0: w = wlim fw = apply(func, (w,)+args) funcalls += 1 elif (w-wlim)*(xc-w) > 0.0: fw = apply(func, (w,)+args) funcalls += 1 if (fw < fc): xb=xc; xc=w; w=xc+_gold*(xc-xb) fb=fc; fc=fw; fw=apply(func, (w,)+args) funcalls += 1 else: w = xc + _gold*(xc-xb) fw = apply(func, (w,)+args) funcalls += 1 xa=xb; xb=xc; xc=w fa=fb; fb=fc; fc=fw return xa, xb, xc, fa, fb, fc, funcalls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convergent_point(x0, x1, f):\n while x0 != x1:\n x0, x1 = f(x0), f(x1)\n return x0", "def downhill(F, xStart, args=None, side=0.1, ftol=1.0e-6, xtol=1.0e-6, maxiter=1000, maxfunc=1000, maxiternochange=10):\n # TODO: check the types of the input ???\n\n # print \"Entering downhill\"\n n = len(xStart)\n x = np.zeros((n+1, n), dtype=float) #point null matrix, n+1 rows, n columns\n f = np.zeros(n+1, dtype=float) # null vector, n+1 columns\n p_count = 0 # counter for detecting a plateau\n f_count = 0 # counter for the number of function call\n f_best_count = 0 # counter for the number of iterations in which the best solution does not change\n f_best_prev = 0.0 # holds the best value from the previous iteration\n epsilon = 0.001 # tolerance for considering two values as equal\n # max_iter_no_change = 10 # maximum number of accepted iterations with no change in the optimal solution\n precision = 2\n round_map = partial(round, ndigits=precision) # partial function for rounding purposes\n\n # initial simplex\n x[0] = xStart\n for i in xrange(1, n+1):\n x[i] = xStart\n x[i,i-1] = xStart[i-1] + side\n\n # print \"Evaluate the starting points\"\n # compute the value of F at the vertices of the simplex\n for i in xrange(n+1):\n f[i] = F(x[i], args)\n # p_count += 1\n\n # main loop\n # print \"Start iterating\"\n for k in xrange(maxiter):\n\n # check the number of function calls\n if f_count > maxfunc:\n print \"Stopping criteria: maximum number of function calls\"\n print \"Best solution so far: \", x[iLo], \" value: \", f[iLo], \" at iteration:\", k\n # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'MAXFUNCALL'}\n return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'MAXFUNCALL'}\n\n # find the best and worst vertex (consider a minimization problem)\n iLo = np.argmin(f) # best vertex\n iHi = np.argmax(f) # worst vertex\n\n # print k,\" \", f[iLo]\n #\n # if f[iLo] < -0.310000:\n # print f[iLo]\n # print x[iLo]\n # print x\n # sys.exit(1)\n # print \"k: \", k, \" f_best_prev: \", f_best_prev, \" f[iLo]: \", f[iLo], \" f_best_count: \", f_best_count\n # print \"Beginning of iteration: %4d | Best x: %4f %4f %4f | Best value: %f\" % (k, x[iLo][0], x[iLo][1], x[iLo][2], f[iLo])\n # print \"x: \", x, \" f: \", f\n # print \"=========================================================================================\"\n # check if the solution has changed from the previous iterations\n if f[iLo] < f_best_prev:\n f_best_prev = f[iLo]\n f_best_count = 0\n else:\n f_best_count += 1\n\n if f_best_count > maxiternochange:\n print \"Stopping criteria: maximum number of iterations with no improvement in the best solution\"\n print \"Best solution so far: \", x[iLo], \" value: \", f[iLo], \" at iteration:\", k\n # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'NOIMPROVEMENT'}\n return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'NOIMPROVEMENT'}\n\n if abs(f[iLo] - f[iHi]) < ftol: # If difference between highest and lowest is smaller than ftol, return\n print \"Stopping criteria: difference between highest and lowest points is smaller than tolerance\"\n print \"Best solution so far: \", x[iLo], \" value: \", f[iLo], \" at iteration:\", k\n # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'MAXTOLERANCE'}\n return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'MAXTOLERANCE'}\n # compute the move vector d\n d = (-(n+1) * x[iHi] + np.sum(x, axis=0)) / n\n # print \"d: \", d\n\n # check for convergence\n if sqrt(np.dot(d, d)/n) < xtol: # length of the vector d\n print \"Stopping criteria: length of step d smaller than tolerance\"\n print \"Best solution so far: \", x[iLo], \" value: \", f[iLo], \" at iteration:\", k\n # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'SMALLSTEP'}\n return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'SMALLSTEP'}\n # try reflection\n xNew = np.array(map(round_map, x[iHi] + 2 * d))\n fNew = F(xNew, args)\n f_count += 1\n # print \"Reflected point: \", xNew, \" value: \", fNew\n\n # check for no improvement over the worst point\n # and for plateau condition\n if f[iHi] - epsilon <= fNew <= f[iHi] + epsilon:\n p_count += 1\n # print \"No improvement here\"\n\n if p_count == n+2: # we reflected all vertices with no improvement\n print \"Stopping criteria: Probably we landed on a plateau... exiting\" # TODO: restart instead of exiting\n print \"Best solution so far: \", x[iLo], \" value: \", f[iLo], \" at iteration:\", k\n # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'PLATEAU'}\n return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'PLATEAU'}\n\n\n else:\n p_count = 0\n\n\n if fNew <= f[iLo]: # if the new value is better than the best so far,\n x[iHi] = xNew # substitute the worst vertex with the new one\n f[iHi] = fNew\n\n # try to expand the reflection\n xNew = np.array(map(round_map, x[iHi] + d))\n fNew = F(xNew, args)\n f_count += 1\n # print \"Expanded point: \", xNew, \" value: \", fNew\n\n if fNew <= f[iHi]: # in the original source version it is f[iLo] (?)\n x[iHi] = xNew\n f[iHi] = fNew\n else:\n # try reflection again\n if fNew <= f[iHi]:\n x[iHi] = xNew\n f[iHi] = fNew\n else:\n # try contraction\n xNew = np.array(map(round_map, x[iHi] + 0.5 * d))\n fNew = F(xNew, args)\n f_count += 1\n # print \"Contracted point: \", xNew, \" value: \", fNew\n\n if fNew <= f[iHi]: # accept contraction\n x[iHi] = xNew\n f[iHi] = fNew\n else:\n # shrink\n for i in xrange(len(x)):\n if i != iLo:\n x[i] = np.array(map(round_map, x[i] - x[iLo] * 0.5))\n f[i] = F(x[i], args)\n f_count += 1\n\n # print \"End of iteration: %4d | Best x: %4f %4f %4f | Best value: %f\" % (k, x[iLo][0], x[iLo][1], x[iLo][2], f[iLo])\n # print \"x: \", x, \" f: \", f\n # print \"*\"*50\n # print \"\"\n\n\n\n print \"Stopping criteria: maximum number of iterations\"\n print \"Best solution so far: \", x[iLo], \" value: \", f[iLo], \" at iteration:\", k\n # return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': (args['Q1'], args['Q2'], args['Q3']), 'stopping': 'MAXITERATION'}\n return {'point' : x[iLo], 'value': f[iLo], 'iteration': k, 'funcalls': f_count, 'allocation': [args['Q{}'.format(h)] for h in xrange(1, args['retailers']+1)], 'stopping': 'MAXITERATION'}", "def search_interval_3d(function, point_a, point_b, step, tol, max_iterations,\n show_process = False):\n # The first interval is created with the values [0, step]\n t = step\n # We use a parametrization for the line based on the two points given.\n # It is possible to get any point in the line changing the parameters t.\n # For t=0 we get the point_b and for t=1 we get the point_a.\n last_val = function( line_param(point_a, point_b, 0) )\n current_val = function( line_param(point_a, point_b, t) )\n \n # While the signs of the function evaluated in the ends of the interval is\n # the same.\n iterations = 0\n while last_val*current_val > 0:\n if iterations > max_iterations:\n raise Exception('Maximum iterations reached. But no solution was found.')\n # Update the step\n last_val = current_val\n t += step\n # Calculate the new value\n current_val = function( line_param(point_a, point_b, t) )\n iterations += 1\n \n # These point represent the interval for which a change is the signs exist.\n # This means that there is a point in this interval for which the function\n # is zero. We use bisection in this interval to find that point.\n left_point = line_param(point_a, point_b, t - step)\n right_point = line_param(point_a, point_b, t)\n \n if show_process: \n print('number of iterations to find interval = {0}'.format(iterations))\n print('interval found : [{0} , {1} ])'.format(left_point, right_point))\n \n return left_point, right_point", "def convergent_point_guarded(x0, x1, y, f):\n d0 = distance(x0, y, f)\n d1 = distance(x1, y, f)\n if d0 < d1:\n x1 = power_unary(x1, d1 - d0, f)\n elif d1 < d0:\n x0 = power_unary(x0, d0 - d1, f)\n return convergent_point(x0, x1, f)", "def pinpoint(f, J0, g0, x0, mu1, mu2, \\\r\n alow, Jalow, galow, ahigh, Jahigh, gahigh, p):\r\n j = 0\r\n Jprime0 = g0.T@p\r\n Jprimealow = galow.T@p\r\n amin = (2*alow*(Jahigh-Jalow)+Jprimealow*(alow**2-ahigh**2))/\\\r\n (2*(Jahigh-Jalow+Jprimealow*(alow-ahigh)))\r\n Jamin, gamin = f(x0+amin*p)\r\n while True:\r\n Jprimeamin = gamin.T@p\r\n if (Jamin > J0+mu1*amin*Jprime0) or (Jamin > Jalow):\r\n ahigh = amin\r\n Jahigh = Jamin\r\n amin = (2*alow*(Jahigh-Jalow)+Jprimealow*(alow**2-ahigh**2))/\\\r\n (2*(Jahigh-Jalow+Jprimealow*(alow-ahigh)))\r\n Jamin, gamin = f(x0+amin*p)\r\n else:\r\n if (np.abs(Jprimeamin) <= np.abs(mu2*Jprime0)): \r\n astar = amin\r\n return Jamin, gamin, astar\r\n elif (Jprimeamin*(ahigh-alow) >= 0):\r\n ahigh = alow\r\n alow = amin\r\n Jalow = Jamin\r\n galow = gamin\r\n Jprimealow = galow.T@p\r\n amin = (2*alow*(Jahigh-Jalow)+Jprimealow*(alow**2-ahigh**2))/\\\r\n (2*(Jahigh-Jalow+Jprimealow*(alow-ahigh)))\r\n Jamin, gamin = f(x0+amin*p)\r\n j += 1", "def bisecter(func, step=0.1):\n points = list(func.points(step))\n area = sum(map(lambda p: p[1], points))\n\n current = 0.\n for x, y in points:\n current += y\n if current >= area / 2:\n return x", "def dichotomous_search(loss_function: rosenbrock, start: point, direction: list, epsilon=0.1) -> float:\n a, b = advance_retreat_method(loss_function, start, direction)\n\n # find the minimum\n e = epsilon / 3\n p, q = (a + b) / 2 - e, (a + b) / 2 + e\n while abs(a - b) > epsilon:\n f_p = loss_function.f(start + point(direction[0] * p, direction[1] * p))\n f_q = loss_function.f(start + point(direction[0] * q, direction[1] * q))\n if f_p < f_q:\n b = q\n else:\n a = p\n p, q = (a + b) / 2 - e, (a + b) / 2 + e\n\n return (a + b) / 2", "def prob6():\n domain = np.linspace(-5, 5, 200)\n x = sy.symbols('x')\n poly = 2*x**6 - 51*x**4 + 48*x**3 + 312*x**2 - 576*x - 100\n f = sy.lambdify(x, poly)\n _1deriv = sy.diff(poly, x)\n critical_pts = sy.solve(_1deriv, x)\n _2deriv = sy.diff(_1deriv, x)\n f_2deriv = sy.lambdify(x, _2deriv)\n loc_min = []\n loc_max = []\n for x0 in critical_pts:\n if f_2deriv(x0) > 0:\n loc_min.append(x0)\n if f_2deriv(x0) < 0:\n loc_max.append(x0)\n\n plt.ion()\n plt.plot(domain, f(domain))\n plt.plot(loc_min, f(np.array(loc_min)), 'ro', label=\"local minimum\")\n plt.plot(loc_max, f(np.array(loc_max)), 'bo', label=\"local maximum\")\n plt.legend()\n plt.show()\n\n return set(loc_min), set(loc_max)", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)", "def _step(self, start):\n #angle = np.random.uniform(0,2*np.pi) # only 2-dim\n #direction = angle2vec(angle)\n\n angle = np.random.randn(self.dim)\n direction = angle / la.norm(angle)\n \n if not self.query(start):\n print(f\"Given an invalid point! {start}\")\n \n testCounter = 0\n max_iter = 1000\n \n ## Case for adding to direction ##\n high = 1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-5\n t_plus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_plus-old_t) > tol):\n old_t = t_plus\n t_plus = (high+low)/2\n testpoint = current + t_plus*direction\n if( self.query(testpoint) ):\n low = t_plus\n else:\n high = t_plus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus loop with: \\n\\\n t_plus = {t_plus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_plus = old_t\n \n ## Case for subtracting from direction\n high = -1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-10\n t_minus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_minus-old_t) > tol):\n old_t = t_minus\n t_minus = (high+low)/2\n testpoint = current + t_minus*direction\n if( self.query(testpoint) ):\n low = t_minus\n else:\n high = t_minus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus loop with: \\n\\\n t_minus = {t_minus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_minus = old_t\n \n # Make the step\n final_t = np.random.uniform(t_minus, t_plus)\n #print(f\"Final t = {final_t}\")\n \n # remove extra returns for now for other compatibility\n return start + final_t*direction #, start+t_plus*direction, start+t_minus*direction", "def optimize(self, startPoint=0, epsilon=1e-5, maxIterations=100):\n n = len(startPoint)\n alpha = 1\n Hk = numpy.eye(n)\n I = numpy.eye(n)\n k = 0\n xk = startPoint\n gk = self.g(xk)\n \n while 1:\n # Compute the norm of the gradient.\n gradNorm = numpy.sqrt(numpy.dot(gk, gk))\n\n # Display the function value for the current iteration.\n fk = f(xk)\n print \"%d: fval = %d, norm = %f\" % (k, fk, gradNorm) \n \n # Termination based on tolerenace criterion.\n if (gradNorm <= epsilon):\n print \"Terminating: Tolerence %f (fval = %f, norm = %f)\"\\\n % (epsilon, fk, gradNorm)\n return {'optimalPoint':xk, 'functVal':fk}\n\n # Termination due to maximum iterations.\n if (k > maxIterations):\n print \"Terminating: Max iterations %d (fval = %f, norm = %f)\" \\\n % (i, fk, gradNorm) \n return {'optimalPoint':xk, 'functVal':fk}\n\n # Computing the search direction.\n pk = -numpy.dot(Hk, gk)\n sk = alpha * pk\n xk1 = xk + sk\n gk1 = self.g(xk1)\n yk = gk1 - gk\n\n # Computing Hk1.\n rhok = 1.0 / numpy.dot(yk, sk)\n A = I - (rhok * numpy.outer(sk, yk))\n B = rhok * numpy.outer(sk, sk)\n Hk = numpy.dot(numpy.dot(A, Hk), A) + B\n\n # Update the variables for the next iteration.\n xk = xk1\n gk = gk1\n k += 1\n pass \n pass", "def fn(x, step):\n if x == stones[-1]: return True \n ans = False \n for ss in (step-1, step, step+1): \n if 0 < ss and x + ss in loc: ans = ans or fn(x + ss, ss)\n return ans", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def firstscheme(nextpoint, point, prevpoint, parameters):\n dx, dt, u = parameters\n return point - u*dt*(nextpoint - 2*point + point)/(dx**2)", "def find_root(function, point_a, point_b, step, tol, max_iterations, \n show_process = False):\n left_point , right_point = search_interval_3d(function, point_a, point_b, \n step, tol, max_iterations,\n show_process)\n \n point_where_zero = bisection_3d(function, left_point, right_point, tol, \n max_iterations, show_process)\n \n return point_where_zero", "def find_direct_gap(self,rpts=5):\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]-self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n return valup,x1up", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def solve(self):\n start = datetime.now()\n f = self.function\n while not self.converged():\n self.history.append(self.vertices)\n\n #step 1: sort\n self.order_vertices()\n\n #step 3: reflect\n reflected = self.get_reflected_point()\n if f(*self.vertices[0]) < f(*reflected) < f(*self.vertices[-1]):\n self.reflect()\n continue\n\n #step 4: expand\n if self.reflected_is_best():\n expanded = self.get_expanded_point()\n if f(*expanded) < f(*reflected):\n self.expand()\n else:\n self.reflect()\n continue\n\n #step 5: contract\n contracted = self.get_contracted_point()\n if f(*contracted) < f(*self.vertices[-1]):\n self.contract()\n continue\n\n #step 6: shrink\n self.shrink()\n print(\"optimization took {0}\".format(datetime.now()-start))\n return self.history, self.cache.history", "def _find_move_direction(fun,keys:list,params:dict,upper_point:dict,lower_point:dict,move_up:dict)->tuple:\n best_score = np.Inf\n move_space = {key: [False, True] for key in params.keys()}\n\n for move in grid(move_space):\n param = {}\n for key in keys:\n if move[key]:\n param[key] = params[key][upper_point[key]]\n else:\n param[key] = params[key][lower_point[key]]\n score = fun(param)\n if score < best_score:\n move_up = move\n best_score = score\n return (best_score,move_up)", "def step_function(xs, ys):\n\n def func(x):\n index = np.searchsorted(xs, x)\n index = len(ys) - 1 if index >= len(ys) else index\n return ys[index]\n\n return func", "def _update_lambdas_when_candidate_outside_trustregion(\n lambdas, newton_step, p_norm, stopping_criteria, converged\n):\n relative_error = abs(p_norm - 1)\n\n if relative_error <= stopping_criteria[\"k_easy\"]:\n converged = True\n\n lambdas_new = lambdas._replace(candidate=newton_step, lower_bound=lambdas.candidate)\n\n return lambdas_new, converged", "def crank_nicolson_fd(main_args,\n boundary_left_args, boundary_right_args,\n initial_func,\n min_x, max_x,\n max_t,\n step_x, step_t,\n boundary_approximation_func='first_order_two_points',\n theta=0.5):\n\n d = {\n 'first_order_two_points': _o1p2, # o - order, p - points\n 'second_order_two_points': _o2p2,\n 'second_order_three_points': _o2p3\n }\n (complete_matrix,\n complete_vector) = d[boundary_approximation_func](main_args,\n boundary_left_args, boundary_right_args,\n step_x, step_t,\n min_x, max_x)\n\n m = int(max_t / step_t) + 1\n n = int((max_x - min_x) / step_x) + 1\n u = [None for _ in range(m)]\n u[0] = [initial_func(min_x + x * step_x) for x in range(n)]\n\n a, b, c, f = main_args\n\n A = a * (1 - theta) / step_x ** 2 - b * (1 - theta) / (2 * step_x)\n B = c * (1 - theta) - 2 * a * (1 - theta) / step_x ** 2 - 1 / step_t\n C = a * (1 - theta) / step_x ** 2 + b * (1 - theta) / (2 * step_x)\n\n X = b * theta / (2 * step_x) - a * theta / step_x ** 2\n Y = 2 * a * theta / step_x ** 2 - c * theta - 1 / step_t\n Z = - a * theta / step_x ** 2 - b * theta / (2 * step_x)\n\n matrix_u_t = Matrix(size=(n, 3))\n for i in range(1, n - 1):\n matrix_u_t[i] = [A, B, C]\n complete_matrix(matrix_u_t)\n\n for t in range(1, m):\n v = Vector(size=(n, 1))\n for x in range(1, n - 1):\n v[x] = (u[t - 1][x - 1] * X +\n u[t - 1][x] * Y +\n u[t - 1][x + 1] * Z +\n (theta - 1) * f(min_x + x * step_x, t * step_t) -\n theta * f(min_x + x * step_x, (t - 1) * step_t))\n complete_vector(v, t * step_t, matrix_u_t, u[t-1][0], u[t-1][-1])\n u[t] = list(TDMA(mtrx=matrix_u_t, vec=v).solve())\n\n return u", "def find_min_tour(point_set, point_coordinates, x_coor_dict, first_point_indices):\n # using a list here since the order matters, if not then should use a set\n visited_cities = [1, ] # start at city 1\n visited_cities_set = set()\n visited_cities_set.add(1)\n num_cities = len(point_set)\n min_tour_length = 0.00000\n\n current_city = 1\n closest_point = 1\n closest_dist = inf\n\n while len(visited_cities_set) != num_cities:\n cities_left = num_cities - len(visited_cities)\n print(\"{} cities left\".format(str(cities_left)))\n\n # iterating through all the points this way since there might be a way to\n # optimize the algorithm by taking advantage of the fact that the points\n # are sorted (by x first and then by y)\n for first_point_index in first_point_indices:\n for point in x_coor_dict[first_point_index]:\n # checking whether the point is visited already must be done using a set\n # using a list would involve linear search and slows the program down\n # as the visited cities grow in number\n if point == current_city or point in visited_cities_set:\n continue\n current_city_point = point_coordinates[current_city]\n other_city_point = point_coordinates[point]\n\n distance = current_city_point.distance_to(other_city_point)\n if distance < closest_dist:\n closest_dist = distance\n closest_point = point\n visited_cities.append(closest_point)\n visited_cities_set.add(closest_point)\n current_city = closest_point\n min_tour_length += closest_dist\n closest_dist = inf\n\n # calculate the distance from the last city to the first to complete the tour\n last_city = visited_cities[len(visited_cities) - 1]\n last_city_point = point_coordinates[last_city]\n first_city_point = point_coordinates[1]\n min_tour_length += first_city_point.distance_to(last_city_point)\n\n print(\"Minimum tour length: \" + str(min_tour_length))\n return min_tour_length", "def _check_for_interior_convergence_and_update(\n x_candidate,\n hessian_info,\n lambdas,\n stopping_criteria,\n converged,\n):\n if lambdas.candidate == 0:\n x_candidate = np.zeros_like(x_candidate)\n converged = True\n\n s_min, z_min = estimate_smallest_singular_value(hessian_info.upper_triangular)\n step_len = 2\n\n if step_len**2 * s_min**2 <= stopping_criteria[\"k_hard\"] * lambdas.candidate:\n x_candidate = step_len * z_min\n converged = True\n\n lambda_lower_bound = max(lambdas.lower_bound, lambdas.upper_bound - s_min**2)\n lambda_new_candidate = _get_new_lambda_candidate(\n lower_bound=lambda_lower_bound, upper_bound=lambdas.candidate\n )\n\n lambdas_new = lambdas._replace(\n candidate=lambda_new_candidate,\n lower_bound=lambda_lower_bound,\n upper_bound=lambdas.candidate,\n )\n\n return x_candidate, lambdas_new, converged", "def compute_spline(self, initial_state, final_state):\r\n a, b, c, s = self._initialize_spline(initial_state, final_state)\r\n final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n\r\n converge = self._check_converge(final_state, final_state_pred)\r\n total_iter = 0\r\n # pdb.set_trace()\r\n while (total_iter < self.max_iter) & (converge is not True): # (total_iter < self.max_iter) \r\n \r\n \r\n correction = self._compute_correction(initial_state, final_state, a, b, c, s)\r\n a = a - correction[0]\r\n b = b - correction[1]\r\n # c = c - correction[2]\r\n s = s - correction[2]\r\n \r\n final_state_pred = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n\r\n converge = self._check_converge(final_state, final_state_pred)\r\n total_iter = total_iter +1\r\n\r\n # print(total_iter)\r\n # print(final_state_pred)\r\n # print(s)\r\n\r\n # sometimes it converge to negative s (travel distance) which \r\n # is invalid..., need to figure it out...\r\n if (converge == True) & (s > 0):\r\n final_state_pred, point_list = self._path_sampling_one_shot(initial_state, a, b, c, s)\r\n else:\r\n point_list = [[-1,-1]]\r\n\r\n return point_list", "def find_zero(f, df):\n def near_zero(x):\n return approx_eq(f(x), 0)\n return improve(newton_update(f, df), near_zero)", "def frank_wolfe(initial_point, iterations, kernel, kernel_mean_map, test_points, steps='line-search'):\n line_search = False\n if steps == 'line-search':\n line_search = True\n rho_arr = np.empty(iterations)\n rho_arr[0] = frank_wolfe_step_line_search(initial_point, np.zeros((0)), np.zeros((0,2)), kernel, kernel_mean_map)\n elif type(steps) is str:\n rho_arr = frank_wolfe_steps(steps, iterations)\n elif type(steps) in [list, np.ndarray]:\n rho_arr = np.asarray(steps)\n else:\n raise Exception(\"Don't understand rho_method={}\".format(steps))\n\n assert len(rho_arr) == iterations\n ret = np.empty((iterations, initial_point.shape[1]))\n ret[0, :] = initial_point\n for i in xrange(1, iterations):\n # todo: optimal weights\n weights = frank_wolfe_weights(rho_arr, i)\n scores = frank_wolfe_scores(weights, ret[:i, :], test_points, kernel, kernel_mean_map)\n best_score_ix = np.argmin(scores)\n new_pt = test_points[best_score_ix, :]\n ret[i, :] = new_pt\n \n if line_search:\n rho_arr[i] = frank_wolfe_step_line_search(new_pt, weights, ret[:i, :], kernel, kernel_mean_map)\n final_weights = frank_wolfe_weights(rho_arr, iterations)\n return ret, final_weights", "def newtonsMethod(f, df, ddf, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = np.dot(-la.inv(ddf(x)), (df(x)))\n\n slope = np.dot(df(x), point)\n\n a = backtracking(f, slope, x, point)\n \n #update point\n x_k = x + a*point\n points.append(x_k)\n x = x_k\n\n return points", "def check_directions_find_waypoint(current_point, current_segment,\n delta_before_after, segmented_points):\n\n delta_lat_before_current = delta_before_after[0]\n delta_lng_before_current = delta_before_after[1]\n\n delta_lat_after_current = delta_before_after[2]\n delta_lng_after_current = delta_before_after[3]\n\n # check to see if the delta x's in both directions are longer\n # than the delta y's in both directions\n if (delta_lat_before_current > delta_lng_before_current) and \\\n (delta_lat_after_current > delta_lng_after_current):\n print \"inside first if\"\n # the latitudes are longer than the longitudes, get waypoints\n # in the longitude direction\n\n # don't forget to generate waypoints\n waypoint_e_w = inspect_waypoints(current_point, \"lngwise\")\n try_waypoints(waypoint_e_w, current_segment, segmented_points)\n elif (delta_lng_before_current > delta_lat_before_current) and \\\n (delta_lng_after_current > delta_lat_after_current):\n print \"inside elif, checks the north and south creation\"\n # the longitudes are longer than the latitudes, get waypoints\n # in the latitude direction\n\n # don't forget to generate waypoints\n waypoint_n_s = inspect_waypoints(current_point, \"latwise\")\n try_waypoints(waypoint_n_s, current_segment, segmented_points)\n else:\n print \"inside else, checks all directions NS-EW\"\n\n # don't forget to generate waypoints\n waypoint_all = inspect_waypoints(current_point, \"all\")\n try_waypoints(waypoint_all, current_segment, segmented_points)\n\n # return only the waypoints and start/end lat,lngs\n return segmented_points", "def pointfind(plat, plon, lat, lon, pdif = 1):\n\t\n\tfff = 10\n\twhile (fff > 1):\n\t\t\n\t\t#conditions for latitude (lat - 2d array of latitudes)\n\t\tc_lat=(lat>(plat-pdif))&(lat<(plat+pdif))\n\t\t#conditions for longiyude (lon - 2d array of longitudes)\n\t\tc_lon=(lon>(plon-pdif))&(lon<(plon+pdif))\n\t\t\n\t\t#combine both conditions together\n\t\tc_all=c_lat&c_lon\n\t\t\n\t\t#values of the points that fulfil conditions\n\t\tplatf = lat[numpy.nonzero(c_all)]\n\t\tplonf = lon[numpy.nonzero(c_all)]\n\t\t\n\t\t\t\t\n\t\t#indeces of the poin that fulfil conditions \n\t\tg = numpy.nonzero(c_all)\n\t\t\n\t\t\n\t\t#check if we have found uniq solution\n\t\tfff = platf.shape[0]\n\t\t# decrease window to reduce amount of solutions if we have more than one\n\t\t#print(pdif)\n\t\tpdif = pdif-0.001\n\tprint(\"coordinates of the point that fulfil conditions: \"+str(platf)+\" \"+str(plonf))\n\tprint(\"indeces of the point that fulfil conditions: \"+str(g[0])+\" \"+str(g[1]))\n\t\n\treturn(g, platf, plonf)", "def problem5(self, s):\n points = 0\n\n points = self.neighbor( 10, 10, s.nearest_neighbor)*3\n points += self.neighbor(100, 10, s.nearest_neighbor)*3\n points += self.neighbor( 10, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n\n _testDriver.get_code(s.nearest_neighbor)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n \n return points", "def uniform_search(fun, a, b, E, n=3, counter=0):\n if b - a < E:\n return (b + a) / 2, counter\n step = (b - a) / n\n xn = a + step\n min_x = a\n min_f = fun(a)\n while xn <= b:\n counter += 1\n f = fun(xn)\n if f < min_f:\n min_x = xn\n min_f = f\n xn += step\n counter += 2\n if fun(min_x - step) < fun(min_x + step):\n return uniform_search(fun, min_x - step, min_x, E, n, counter)\n return uniform_search(fun, min_x, min_x + step, E, n, counter)", "def optimized_travelling_salesman(points, start=None):\n if start is None:\n start = points[0]\n must_visit = points\n path = [start]\n must_visit.remove(start)\n while must_visit:\n nearest = min(must_visit, key=lambda x: distance(path[-1], x))\n path.append(nearest)\n must_visit.remove(nearest)\n return path", "def FindClosestInsertedPoint(self, ):\n ...", "def graham_scan(points):\n\n # Find point with smallest y coordinate\n # If two points have equal y coordinates, select the one with the lower x-coordinate\n smallest = points[0]\n for p in points:\n if p[1] < smallest[1]:\n smallest = p\n elif p[1] == smallest[1]:\n if p[0] < smallest[0]:\n smallest = p\n\n # Sort points by angle over smallest to x-axis\n points.sort(key=lambda x: angle(x, smallest))\n\n # Our stack\n hull = [smallest, points[1]]\n i = 2\n while i < len(points):\n # If the last points and the new point form a counter-clockwise triangle,\n # we need the last point. Therefore, push the new point\n if ccw(hull[-2], hull[-1], points[i]) > 0 or len(hull) == 2:\n hull.append(points[i])\n i += 1\n # If the two last points and the new point don't form a counter-clockwise triangle,\n # the we don't need the last point\n else:\n hull.pop()\n return hull", "def new_and_near(self):\n if self.prob and random.random() < self.prob:\n x_rand = self.goal_config\n else:\n x_rand = self.planning_env.sample_free()\n x_nearest_id, x_nearest = self.tree.GetNearestVertex(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n # check if new point is in X_free and not already in V\n # if x_new in self.tree.vertices or not self.planning_env.state_validity_checker(x_new):\n if x_new in self.tree.vertices or not self.planning_env.collision_free(x_new, x_nearest):\n return None, None\n\n self.tree.samples_taken += 1\n return x_new, x_nearest", "def bisection(f, fu, point_a, point_b, point_c, point_d, lower_bound, upper_bound, length):\n n = 1\n theta = 0\n a = lower_bound\n b = upper_bound\n while n <= 100:\n theta = (a + b) / 2.0\n if -1e-6 < f(fu(point_a, point_b, point_c, theta), point_d) - length < 1e-6:\n # print 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n # print 'iteration', n\n return theta\n else:\n n = n + 1\n if f(fu(point_a, point_b, point_c, theta), point_d) - length > 0:\n b = theta\n else:\n a = theta\n\n print 'failedtheta', theta, 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n print 'iteration', n\n return False", "def _closest_point(self, x, z, start_param, Ns):\n pi = np.pi\n def f(t):\n px, pz = self(t)\n return np.sqrt((x-px)**2 + (z-pz)**2)\n if start_param is None:\n x0 = brute(lambda x: f(x[0]), [[0, pi]], Ns=Ns, finish=None)\n step = np.pi/(Ns-1)\n res = minimize_scalar(\n f, bounds=[max(0, x0-step), min(np.pi, x0+step)], method='bounded',\n options=dict(xatol=1e-12),\n )\n else:\n res = minimize_scalar(f, bracket=(start_param, pi/Ns),\n options=dict(xtol=1e-12))\n la = res.x\n return la", "def get_new_bracket(x1, x2, x3, x4):\n points = [x1, x2, x3]\n dist = float(inf)\n for point in points:\n if abs(x4 - point) < dist and f(point) * f(x4) < 0:\n valid_point = point\n dist = abs(x4 - point)\n return valid_point", "def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir", "def fix_point(h, lower, upper):\n return brentq(lambda x: x - h(x), lower, upper)", "def brents(f, x0, x1, max_iter=50, tolerance=1e-5):\n \n fx0 = f(x0)\n fx1 = f(x1)\n \n assert (fx0 * fx1) <= 0, \"Root not bracketed\" \n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n \n x2, fx2 = x0, fx0\n \n d = np.nan\n mflag = True\n steps_taken = 0\n \n while steps_taken < max_iter and abs(x1-x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n \n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n \n else:\n new = x1 - ( (fx1 * (x1 - x0)) / (fx1 - fx0) )\n \n tt1 = (new < ((3 * x0 + x1) / 4) or new > x1)\n tt2 = (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2))\n tt3 = (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2))\n tt4 = (mflag == True and (abs(x1 - x2)) < tolerance)\n tt5 = (mflag == False and (abs(x2 - d)) < tolerance)\n if (tt1 or\n tt2 or\n tt3 or\n tt4 or\n tt5):\n new = (x0 + x1) / 2\n mflag = True\n \n else:\n mflag = False\n \n fnew = f(new)\n d, x2 = x2, x1\n \n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n \n steps_taken += 1\n \n return x1, steps_taken", "def cgd_linesearch(x, error0, direction, error_fcn, h):\n\n # FIXME: Add tests\n\n x = np.asarray(x)\n direction = np.asarray(direction)\n h = np.asarray(h)\n\n direction_n = direction / np.linalg.norm(direction, ord=2)\n error_list = [error0]\n stepsize = h\n maxSteps = 6\n factor = np.zeros(1)\n\n for iStep in range(1, maxSteps):\n\n factor = np.concatenate([factor, [2**(iStep-1)]])\n xc = x.copy() + direction_n * stepsize * factor[iStep]\n error, xc = error_fcn(xc) # xc may be changed due to limits\n error_list.append(error)\n\n if error_list[-1] >= error_list[-2]: # end of decline\n if iStep == 1: # no success\n step = 0\n error1 = error0\n\n else: # parabolic\n p = np.polyfit(factor, error_list, 2)\n fx = np.arange(factor[0], factor[-1] + .1, .1)\n fy = np.polyval(p, fx)\n idx = np.argmin(fy)\n fxm = fx[idx]\n xcm = x.copy() + direction_n * stepsize * fxm\n error1, xcm = error_fcn(xcm) # xc may be changed due to limits\n\n if error1 < error_list[iStep - 1]:\n xc = xcm.copy()\n step = fxm\n\n else: # finding Minimum did not work\n xc = x.copy() + direction_n * stepsize * factor[iStep-1] # before last point\n error1, xc = error_fcn(xc) # recalculate error in order to check for limits again\n step = factor[iStep-1]\n\n return xc, error1, step\n\n step = factor[iStep]\n error1 = error_list[iStep]\n\n return xc, error1, step", "def squareSearch( self, tTopLeft, tBottomRight, function, argsList ): #by LOQ\n\t\ttPaintedList = []\n\t\tresult = None\n\t\tfor x in range(tTopLeft[0], tBottomRight[0]+1):\n\t\t\tfor y in range(tTopLeft[1], tBottomRight[1]+1, -1): # edead: added -1, not sure why it didn't work before\n\t\t\t\tresult, bPaintPlot, bContinueSearch = function((x, y), result, argsList)\n\t\t\t\tif bPaintPlot: # paint plot\n\t\t\t\t\ttPaintedList.append((x, y))\n\t\t\t\tif not bContinueSearch: # goal reached, so stop\n\t\t\t\t\treturn result, tPaintedList\n\t\treturn result, tPaintedList", "def test_get_points_to_estimate(self):\r\n # Ref in range.\r\n obs = self.estimator1._get_points_to_estimate(4, 1, 5, 4)\r\n self.assertEqual(obs, [1, 2, 3, 4, 5])\r\n\r\n # Ref not in range.\r\n obs = self.estimator1._get_points_to_estimate(4, 5, 10, 2)\r\n self.assertEqual(obs, [4, 5, 7, 9])\r\n\r\n # stop not supplied.\r\n obs = self.estimator1._get_points_to_estimate(5, 5, num_steps=2)\r\n self.assertEqual(obs, [5, 17, 29])", "def find_edges(starting_point, max_dist, hi, lo, bgArray):\n try:\n b = fetch_val(bgArray, starting_point)\n except IndexError:\n return None\n offsets = [(0,1), (1,0), (0,-1), (-1,0)]\n edgePoints = []\n for offset in offsets:\n first_result = find_edge(starting_point, offset, max_dist, hi, lo, bgArray)\n if first_result is not None:\n edgePoints.append(first_result[0])\n if b < lo or b > hi:\n # Try to find second point, since starting click was outside threshold\n second_result = find_edge(first_result[0], offset, max_dist - first_result[1], hi, lo, bgArray)\n if second_result is not None:\n edgePoints.append(second_result[0])\n return edgePoints", "def jarvis_convex_hull(points):\n start_index = np.argmax(points[:, 0]) # Point with the highest y-coordinate\n start_point = points[start_index]\n # result = [start_index[:]]\n result = [start_index]\n added_points = {start_index}\n while True:\n for ref_index, ref_point in enumerate(points):\n exit_ = True\n if ref_index == start_index or ref_index in added_points:\n continue\n\n signs = 0\n threshold = len(points) - 2\n for compare_index, compare_point in enumerate(points):\n if compare_index == ref_index or compare_index == start_index:\n continue\n check = compare(start_point, ref_point, compare_point)\n if abs(check) < 1e-2:\n dist_start_ref = distance(start_point, ref_point)\n dist_start_compare = distance(start_point, compare_point)\n if dist_start_compare > dist_start_ref:\n threshold = threshold + 1\n else:\n threshold = threshold - 1\n continue\n signs = signs + 1 if check > 0 else signs - 1\n\n if abs(signs) < threshold:\n continue\n\n exit_ = False\n result.append(ref_index[:])\n added_points.add(ref_index)\n start_index = ref_index\n break\n\n if exit_:\n return result", "def newtons_method(function, start, epsilon_rounding=6):\n point = start\n\n f = get_gradient(function)\n jacobian_matrix = get_jacobian(f)\n inverse_jacobian = jacobian_matrix.inv()\n\n f_subs = gradient_subs(f, point)\n\n temp = [0, 0]\n\n points = [point]\n while temp != point:\n jacobian_subs_matrix = matrix_subs(jacobian_matrix, point)\n inverse_subs_jacobian = matrix_subs(inverse_jacobian, point)\n negative_gradient = Matrix([-x for x in f_subs])\n solution = Ax_b(jacobian_subs_matrix, negative_gradient)\n temp = [round(float(x), epsilon_rounding) for x in point]\n point = [a + b for a, b in zip(solution, point)]\n point = [round(float(x), epsilon_rounding) for x in point]\n points.append(point)\n f_subs = gradient_subs(f, point)\n new_minimum = [float(x) for x in point]\n\n return new_minimum, points, f\"The minimum is {new_minimum}, with a starting point of {start}\"", "def fibonacci_search(loss_function: rosenbrock, start: point, direction: list, epsilon=1) -> float:\n a, b = advance_retreat_method(loss_function, start, direction)\n\n # build the Fibonacci series\n F, d = [1.0, 2.0], (b - a) / epsilon\n while F[-1] < d: F.append(F[-1] + F[-2])\n\n # find the minimum\n N = len(F) - 1\n p, q = a + (1 - F[N - 1] / F[N]) * (b - a), a + F[N - 1] / F[N] * (b - a)\n while abs(a - b) > epsilon and N > 0:\n N = N - 1\n f_p = loss_function.f(start + point(direction[0] * p, direction[1] * p))\n f_q = loss_function.f(start + point(direction[0] * q, direction[1] * q))\n if f_p < f_q:\n b, q = q, p\n p = a + (1 - F[N - 1] / F[N]) * (b - a)\n else:\n a, p = p, q\n q = a + F[N - 1] / F[N] * (b - a)\n\n return (a + b) / 2", "def heuristic(current, goal):\r\n # return 1\r\n i = current[0] - goal[0]\r\n j = current[1] - goal[1]\r\n return math.sqrt(math.pow(i,2) + math.pow(j,2)) # Your code here\r\n # return math.fabs(current[0] - goal[0]) + math.fabs(current[1] - goal[1])\r", "def fl_search(fun, params: dict, n_iter: int=10)->dict:\n\n\n keys=list(params.keys())\n\n num_points={key: len(value) for key, value in params.items()}\n\n if not all(value == sorted(value) for key, value in params.items()):\n raise Exception(\" Some parameters are not in ascending order\")\n\n lower_point, upper_point=_init_upper_lower_points(keys=keys,num_points=num_points)\n move_up={}\n tracking=[]\n\n\n for _ in range(n_iter):\n # find the move direction for next round\n score,move_up= _find_move_direction(fun=fun,keys=keys,params=params,upper_point=upper_point,\n lower_point=lower_point,move_up=move_up)\n\n # Track the score for the optimization\n if len(tracking) >= 1 and score == tracking[-1]:\n break\n else:\n tracking.append(score)\n param = {}\n for key in keys:\n if move_up[key]:\n param[key] = params[key][upper_point[key]]\n else:\n param[key] = params[key][lower_point[key]]\n\n # Reset the lower_point and upper_point based move direction\n lower_point, upper_point = _reset_upper_lower_points(keys=keys, move_up=move_up,\n num_points=num_points,\n upper_point=upper_point,\n lower_point=lower_point)\n\n\n\n return (param, tracking)", "def run(self, function, x):\n self.check_compatibility(function[0], self.INTERFACES)\n self.check_compatibility(function[1], self.INTERFACES)\n\n x_new = x\n p_new = np.zeros(x.shape)\n q_new = np.zeros(x.shape)\n for i in xrange(1, self.max_iter + 1):\n\n x_old = x_new\n p_old = p_new\n q_old = q_new\n\n y_old = function[0].proj(x_old + p_old)\n p_new = x_old + p_old - y_old\n x_new = function[1].proj(y_old + q_old)\n q_new = y_old + q_old - x_new\n\n if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \\\n and i >= self.min_iter:\n break\n\n return x_new", "def find_indirect_gap(self,rpts=5):\n # First find the miniumu of the upper band.\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n # Repeat the same for the lower band\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun2= lambda x: -self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1dn=optimize.minimize(fun2,x0dn).x\n valdn=fun2(x1dn)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n # Also always check special points in the BZ\n x0dn=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n x0dn=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n \n return valup+valdn,x1up,x1dn", "def getPointAwayFrom(startPoint, direction, distance):\n x = vectorMultiply(direction, distance)\n return vectorAdd(startPoint, x)", "def greedyOptimize(self, cpoints):\n # the currently best known energy is the current energy\n best_energy = self.totalEnergy(cpoints.values())\n best_before = best_energy\n cpoints_ = cpoints.copy()\n # iterate over each control point in order to find the movement\n # that improves it i.e. the snakes overall energy best\n cv = cpoints_.values()\n for i in range(len(cpoints_)):\n best_step = None \n # test all possible steps\n for step in self.step_directions:\n c1 = cpoints_[i]\n # only check a step if it ends within the image bounds\n if self.inImageBound(cpoints_[i] + step):\n # apply the step to the control point\n cpoints_[i] = cpoints_[i] + step\n # compute the new energy\n new = self.totalEnergy(cpoints_.values())\n # check wether it is a true improvement\n if new < best_energy:\n assert new < best_energy\n # update the currently best known energy\n best_energy = new\n best_step = step\n cv = cpoints_.values()\n cpoints_[i] = cpoints_[i] - step\n assert (c1[0], c1[1]) == (cpoints_[i][0], cpoints_[i][1])\n \n # apply the best step to the control point\n if best_step != None:\n cpoints_[i] = cpoints_[i] + best_step\n \n # ensure saneness\n assert np.array_equal(cv, cpoints_.values())\n self.bestenergy_debug = best_energy\n assert best_before >= best_energy, '(%s !>= %s) the optimized energy is not euqal-smaller than the energy before' % (best_before, best_energy)\n assert self.totalEnergy(cpoints_.values()) == best_energy, '(%s != %s) the new calculated energy does not equal the best calculated energy' % (self.totalEnergy(cpoints_.values()), best_energy)\n return cpoints_", "def pointfind2(plat, plon, lat, lon, pdif=1):\n\n\tdist_min = 1000000.\n\t\n\t\n\tfor i in range(lon.shape[0]):\n\t\tfor j in range(lon.shape[1]):\n\t\t\tdist = Ngl.gc_dist(plat,plon,lat[i,j],lon[i,j])\n\t\t\tif dist_min > dist:\n\t\t\t\tdist_min = dist\n\t\t\t\ti_min = i\n\t\t\t\tj_min = j\n\t\t\t\tlat_min = lat[i,j]\n\t\t\t\tlon_min = lon[i,j]\n\t\n\tprint(i_min,j_min,lat_min,lon_min)\n\tgg1 = i_min, j_min\n\t\n\treturn(gg1, lat_min, lon_min)", "def run(self, function, x):\n self.check_compatibility(function[0], self.INTERFACES)\n self.check_compatibility(function[1], self.INTERFACES)\n\n x_new = x\n p_new = np.zeros(x.shape)\n q_new = np.zeros(x.shape)\n for i in xrange(1, self.max_iter + 1):\n\n x_old = x_new\n p_old = p_new\n q_old = q_new\n\n y_old = function[0].prox(x_old + p_old)\n p_new = x_old + p_old - y_old\n x_new = function[1].prox(y_old + q_old)\n q_new = y_old + q_old - x_new\n\n if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \\\n and i >= self.min_iter:\n break\n\n return x_new", "def find_crime_areas(segmented_points):\n\n # once all of the interpolated points are loaded into segmented_points\n # loop through them again to find out which places are high crime.\n bad_neighborhood_crime_index = 0.2\n\n for j in range(1, len(segmented_points)):\n print \"segmented_points[j]\", segmented_points[j]\n # ====================================================================\n # waypoint algorithm fleshing out\n # ====================================================================\n if segmented_points[j]['crime_index'] > bad_neighborhood_crime_index:\n # get the center of the geohash\n print \"This is a bad neighborhood\"\n\n # this is probably temporary, for display purposes\n segmented_points[j]['is_high_crime'] = True\n\n # do a conditional that if the bad neighborhood is at\n # len(segmented_points) we need to go get the end dict\n\n # now that we know what the bad neighborhood point is, let's get\n # the latitude, longitude from the point before and the point after\n if 'lat' not in segmented_points[j-1] or 'lng' not in segmented_points[j-1]:\n point_before = (segmented_points[j-1]['data']['start']['lat'],\n segmented_points[j-1]['data']['start']['lng'])\n else:\n point_before = (segmented_points[j-1]['lat'],\n segmented_points[j-1]['lng'])\n\n if 'lat' not in segmented_points[j+1] or 'lng' not in segmented_points[j+1]:\n point_after = (segmented_points[j+1]['data']['end']['lat'],\n segmented_points[j+1]['data']['end']['lng'])\n else:\n point_after = (segmented_points[j+1]['lat'],\n segmented_points[j+1]['lng'])\n\n current_point = (segmented_points[j]['lat'],\n segmented_points[j]['lng'])\n\n # before calling inspect_waypoints, check the deltas for the\n # step before and the step after to determine whether the function\n # needs to be called twice, or four times, and what direction to go\n # get the change in latitude and longitude between the before\n # and current point location\n delta_lat_before_current = current_point[0] - point_before[0]\n delta_lng_before_current = current_point[1] - point_before[1]\n\n # get the change in latitude and longitude between the before\n # and current point location\n delta_lat_after_current = point_after[0] - current_point[0]\n delta_lng_after_current = point_after[1] - current_point[1]\n\n delta_before_after = [delta_lat_before_current, delta_lng_before_current,\n delta_lat_after_current, delta_lng_after_current]\n\n segmented_points = check_directions_find_waypoint(current_point,\n segmented_points[j],\n delta_before_after,\n segmented_points)\n print \"this is segmented_points[0] returned\", segmented_points[0]\n return segmented_points[0]", "def brute_closest_pair(points):\n n = len(points)\n min_distance = float(\"inf\")\n last_pair = None\n for i in range(n):\n for j in range(i+1, n):\n result = distance(points[i], points[j])\n if result < min_distance:\n min_distance = result\n last_pair = [points[i], points[j]]\n return last_pair", "def graham_scan(points):\n if len(points) <= 3:\n return points\n pointList = ExtendedTupleList(points)\n complete_range = pointList.range_within(0, 1)\n first_point = (complete_range[1][\"min\"][1], complete_range[1][\"min\"][0])\n newPoints = ExtendedTupleList([])\n for point in pointList:\n square_dist, cosine = line_length_angle((first_point, point))\n new_point = (point[0], point[1], square_dist, cosine)\n newPoints.append(new_point)\n newPoints.double_sort(3, 2, reverse_outside = True, reverse_inside = True)\n hull = ExtendedTupleList([])\n hull.append(first_point)\n hull.append(newPoints[0])\n lastAngle = newPoints[0][3]\n for k in range(1, len(newPoints)):\n if newPoints[k][3] == lastAngle:\n continue\n lastAngle = newPoints[k][3]\n while (len(hull) >= 2 and direction(hull[-2], hull[-1], newPoints[k]) >= 0):\n hull.pop()\n hull.append(newPoints[k])\n real_hull = []\n for point in hull:\n real_hull.append((point[0], point[1]))\n real_hull.append(real_hull[0])\n return real_hull", "def steepest_descent_f(init_x0, init_x1, alpha, n=5):\n ret = [(init_x0, init_x1)]\n a = init_x0\n b = init_x1\n for i in range(n):\n a1 = a - alpha * gradF1(a, b)\n b1 = b - alpha * gradF2(a, b)\n ret.append((a1, b1))\n a = a1\n b = b1 \n \n return ret", "def FindClosestPoint(self, ):\n ...", "def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T", "def fast(maze):\n # TODO: Write your code here\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n goals_pq = new_pq(maze, goals, maze.getStart())\n\n f, curr_goal = heapq.heappop(goals_pq)\n heapq.heappush(pq, (f, [maze.getStart()]))\n\n while len(pq) > 0:\n curr_path = heapq.heappop(pq)[1]\n curr = curr_path[-1]\n\n if curr in visited:\n continue\n heuristic = closest(maze, curr, curr_goal)\n\n f = heuristic + len(curr_path) - 1\n visited[curr] = f\n if curr in goals:\n goals.remove(curr)\n if len(goals) == 0:\n return curr_path\n else:\n # print(\"before\")\n # print(curr_goal)\n goals_pq = new_pq(maze, goals, curr)\n f, curr_goal = heapq.heappop(goals_pq)\n # print(\"after\")\n # print(curr_goal)\n pq = []\n heapq.heappush(pq, (f, curr_path))\n visited.clear()\n continue\n for item in maze.getNeighbors(curr[0], curr[1]):\n heuristic = closest(maze, item, curr_goal)\n new_f = heuristic + len(curr_path) - 1\n if item not in visited:\n heapq.heappush(pq, (new_f, curr_path + [item]))\n else: # checks if overlap has smaller f\n if new_f < visited[item]:\n visited[item] = new_f\n heapq.heappush(pq, (new_f, curr_path + [item]))\n return []", "def brute_force(points):\n if len(points) == 2:\n return points[0], points[1], calc_distance(points[0], points[1])\n\n return min(((pair[0], pair[1], calc_distance(pair[0], pair[1]))\n for pair in combinations(points, 2)), key=pair_key)", "def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points", "def refine(self, spline_data, tolerance=170.0, recursions=0):\r\n\r\n # self.spline_data = [coo, u, t, der1, der2, tck]\r\n xx, yy = spline_data[0]\r\n t = spline_data[2]\r\n tck = spline_data[5]\r\n\r\n logger.debug('\\nPoints before refining: {} \\n'.format(len(xx)))\r\n\r\n xn = copy.deepcopy(xx)\r\n yn = copy.deepcopy(yy)\r\n tn = copy.deepcopy(t)\r\n\r\n j = 0\r\n refinements = 0\r\n first = True\r\n refined = dict()\r\n\r\n for i in range(len(xx) - 2):\r\n refined[i] = False\r\n\r\n # angle between two contour line segments\r\n a = np.array([xx[i], yy[i]])\r\n b = np.array([xx[i + 1], yy[i + 1]])\r\n c = np.array([xx[i + 2], yy[i + 2]])\r\n angle = Utils.angle_between(a - b, c - b, degree=True)\r\n\r\n if angle < tolerance:\r\n\r\n logger.debug('Refining between segments {} {},'\r\n .format(i, i + 1))\r\n logger.debug('Tol={0:5.1f}, Angle={1:05.1f}\\n'\r\n .format(tolerance, angle))\r\n\r\n refined[i] = True\r\n refinements += 1\r\n\r\n # parameters for new points\r\n t1 = (t[i] + t[i + 1]) / 2.\r\n t2 = (t[i + 1] + t[i + 2]) / 2.\r\n\r\n # coordinates of new points\r\n p1 = interpolate.splev(t1, tck, der=0)\r\n p2 = interpolate.splev(t2, tck, der=0)\r\n\r\n # insert points and their parameters into arrays\r\n if i > 0 and not refined[i - 1]:\r\n xn = np.insert(xn, i + 1 + j, p1[0])\r\n yn = np.insert(yn, i + 1 + j, p1[1])\r\n tn = np.insert(tn, i + 1 + j, t1)\r\n j += 1\r\n xn = np.insert(xn, i + 2 + j, p2[0])\r\n yn = np.insert(yn, i + 2 + j, p2[1])\r\n tn = np.insert(tn, i + 2 + j, t2)\r\n j += 1\r\n\r\n if first and recursions > 0:\r\n logger.debug('Recursion level: {} \\n'.format(recursions))\r\n first = False\r\n\r\n logger.debug('Points after refining: {}'.format(len(xn)))\r\n\r\n # update coordinate array, including inserted points\r\n spline_data[0] = (xn, yn)\r\n # update parameter array, including parameters of inserted points\r\n spline_data[2] = tn\r\n\r\n # this is the recursion :)\r\n if refinements > 0:\r\n self.refine(spline_data, tolerance, recursions + 1)\r\n\r\n # stopping from recursion if no refinements done in this recursion\r\n else:\r\n # update derivatives, including inserted points\r\n spline_data[3] = interpolate.splev(tn, tck, der=1)\r\n spline_data[4] = interpolate.splev(tn, tck, der=2)\r\n\r\n logger.debug('No more refinements.')\r\n logger.debug('\\nTotal number of recursions: {}'\r\n .format(recursions - 1))\r\n\r\n # due to recursive call to refine, here no object can be returned\r\n # instead use self to transfer data to the outer world :)\r\n self.spline_data = copy.deepcopy(spline_data)\r\n return", "def optimiselike(likefunc, samples, x1):\n\n xnplus1 = x1\n xn = x1 + 100 # makes starting condition true\n \n while not arecloseenough(xnplus1, xn):\n xn = xnplus1 \n xnplus1 = xn - (firstderivative(likefunc, xn, samples)/secondderivative(likefunc, xn, samples))\n \n return xn", "def backtracking(f, slope, x, p, a=1, rho=.9, c=10e-4):\n\n\n while f(x+a*p) > f(x) + c*a*slope :\n a = rho*a\n\n return a", "def fn(lo, hi):\n if lo >= hi: return 0 # no need to guess \n ans = inf\n for mid in range(lo, hi+1): \n ans = min(ans, mid + max(fn(lo, mid-1), fn(mid+1, hi)))\n return ans", "def quasi_newtons_method(f, initial_guess, max_iter = 10000, method = 'BFGS', tol = 1e-12):\n \n if method not in ['BFGS', 'DFP', 'Broyden']:\n raise Exception(\"Not a valid method.\")\n x = initial_guess\n H = np.identity(len(x))\n for i in range(max_iter):\n x_vector = ad.create_vector('x', x)\n fn_at_x = f(x_vector)\n gradient = fn_at_x.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n\n p = -H @ gradient\n \n alpha = line_search(f, x, p)\n delta_x = alpha * p\n\n x = x + delta_x\n x_vector2 = ad.create_vector('x', x)\n fn_at_x2 = f(x_vector2)\n gradient2 = fn_at_x2.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n if np.sqrt(np.abs(gradient2).sum()) < tol:\n break\n y = (gradient2 - gradient).reshape(-1, 1)\n delta_x = delta_x.reshape(-1, 1)\n if method == 'BFGS':\n H = (np.identity(len(H)) - (delta_x @ y.T) / (y.T @ delta_x)) @ H \\\n @ (np.identity(len(H)) - (y @ delta_x.T) / (y.T @ delta_x)) + (delta_x @ delta_x.T) / (y.T @ delta_x)\n elif method == 'DFP':\n H = H + (delta_x @ delta_x.T) / (delta_x.T @ y) - (H @ y @ y.T @ H) / (y.T @ H @ y)\n elif method == 'Broyden':\n H = H + ((delta_x - H @ y) @ delta_x.T @ H) / (delta_x.T @ H @ y)\n\n return (x, i + 1)", "def hillClimbingSearch_FC(problem, maxTrials, userInteraction, beQuiet):\n\n\n currentState = problem.state\n if not beQuiet:\n problem.visualize(currentState)\n steps = 0\n\n # for visualization\n problem.hVals.append(problem.getObjValue(currentState))\n \n while True:\n currentObjVal = problem.getObjValue(currentState)\n if problem.isGlobalOptimum(currentState):\n return steps, currentState\n trials = 0\n betterState = None\n while trials < maxTrials:\n neighbour = problem.getRandomNeighbour(currentState)\n nObjVal = problem.getObjValue(neighbour)\n if problem.isBetter(nObjVal, currentObjVal):\n betterState = neighbour\n break\n trials+=1\n if betterState: \n # jump to neighbour better than current state\n currentState = betterState\n \n # for visualization later on\n problem.hVals.append(problem.getObjValue(currentState))\n \n steps+=1\n if not beQuiet:\n if userInteraction:\n input(\"Press enter to continue \")\n problem.visualize(currentState)\n else:\n print(f\"{maxTrials} trials for random neighbours exhausted. No better neighbour found.\")\n return steps, currentState", "def optimizeposition(areas, omegas, x0, x1, z0, z1):\n\n # initial position of each quadpoint is at the center\n # of the edge connecting the midpoint and a corner point\n rhos = 0.5 * ones(4)\n a = 1 / sqrt(3)\n deltarhos = 0.25 * ones(4) # delta for finite differences\n\n while True: # while method has not converged\n # print(\"################## new iteration #############\")\n rhs = f(rhos, omegas, a, x0, x1, z0, z1, areas)\n print(\"##\")\n print(rhs)\n print(rhos)\n if norm(rhs) < 1e-5:\n break\n mat = df(rhos, omegas, a, x0, x1, z0, z1, areas, deltarhos)\n update = solve(mat, rhs)\n\n rhos += update\n # for i in range(4):\n # rhos[i] = max(0,min(1,rhos[i]))\n \"\"\"\n print(\"the norm of the rhs is \")\n print(norm(rhs))\n print(mat)\n print(\"rhs\")\n print(rhs)\n print(update)\n print(\"rhos\")\n print(rhos)\n \"\"\"\n # print(alpha)\n return rhos", "def points_in_distance(c, d):\n points = set()\n for i in range(0, d + 1):\n points = points | points_in_circle(c, i)\n return points", "def _rootsFinder(self, fun, jac, bounds, npoints, method):\n if method == \"regular\":\n step = (bounds[1] - bounds[0]) / (npoints + 1)\n try:\n X0 = np.arange(bounds[0] + step, bounds[1], step)\n except:\n X0 = np.random.uniform(bounds[0], bounds[1], npoints)\n elif method == \"random\":\n X0 = np.random.uniform(bounds[0], bounds[1], npoints)\n\n def objFun(X, f, jac):\n g = 0\n j = np.zeros(X.shape)\n i = 0\n for x in X:\n fx = f(x)\n g = g + fx**2\n j[i] = 2 * fx * jac(x)\n i = i + 1\n return g, j\n\n opt = minimize(\n lambda X: objFun(X, fun, jac),\n X0,\n method=\"L-BFGS-B\",\n jac=True,\n bounds=[bounds] * len(X0),\n )\n\n X = opt.x\n np.round(X, decimals=5)\n return np.unique(X)", "def bisection_3d(function, point_a, point_b, tol, max_iterations, \n show_process = False):\n # We are going to use a parametrization for the line between point_a and\n # point_b. In this case all the points between a and b can be represented\n # as: x = point_b + t(point_a - point_b), where t in a Real number. For\n # 0 <= t <=1 we get all the points between a and b.\n # This is why the first value for the ends int the bisection are 0 and 1.\n l = 0; r = 1\n mid = (l+r)/2\n\n # Evaluate the function in the left and mid point\n f_l = function( line_param(point_a, point_b, l) )\n f_r = function( line_param(point_a, point_b, r) )\n f_mid = function( line_param(point_a, point_b, mid) )\n\n # Check if cright end of the interval is zero\n if abs(f_r) < tol:\n return line_param(point_a, point_b, r)\n\n # Check if the function evaluated in the right end of the interval is zero\n if abs(f_r) < tol:\n return line_param(point_a, point_b, l)\n \n iterations = 0\n # While the value for the mid point is not zero\n while(abs(f_mid) >= tol):\n if iterations > max_iterations:\n break\n # If the change of sign is on the left side then the new right point\n # is the mid point.\n if f_l*f_mid <=0:\n r = mid\n # If the change of sign is on the right side then the new left point\n # is the mid point.\n else:\n l = mid\n f_l = function( line_param(point_a, point_b, l) )\n \n # Calculate and evaluate the new mid point\n mid = (r+l)/2\n f_mid = function( line_param(point_a, point_b, mid) )\n iterations += 1\n \n if show_process:\n print('number of iterations to find root = {0}'.format(iterations))\n\n return line_param(point_a, point_b, mid)", "def line_search_astep(stx, fx, dx, sty, fy, dy, stp, fp, dp, brackt, stpmin, stpmax):\r\n # parameter\r\n p66 = 0.66 # TODO why a magic constant\r\n\r\n sgnd = dp * (dx / abs(dx))\r\n\r\n if (fp > fx):\r\n # First case: A higher function value. The minimum is bracketed.\r\n # If the cubic step is closer to stx than the quadratic step, the\r\n # cubic step is taken, otherwise the average of the cubic and\r\n # quadratic steps is taken.\r\n\r\n theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp\r\n s = max((abs(theta), abs(dx), abs(dp)))\r\n gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))\r\n if (stp < stx):\r\n gamma = -gamma\r\n\r\n p = (gamma - dx) + theta\r\n q = ((gamma - dx) + gamma) + dp\r\n r = p / q\r\n stpc = stx + r * (stp - stx)\r\n stpq = stx + ((dx / ((fx - fp) / (stp - stx) + dx)) / 2.0) * (stp - stx)\r\n if (abs(stpc - stx) < abs(stpq - stx)):\r\n stpf = stpc\r\n else:\r\n stpf = stpc + (stpq - stpc) / 2.0\r\n\r\n # brackt = true\r\n\r\n elif (sgnd < 0.0):\r\n # Second case: A lower function value and derivatives of opposite\r\n # sign. The minimum is bracketed. If the cubic step is farther from\r\n # stp than the secant step, the cubic step is taken, otherwise the\r\n # secant step is taken.\r\n\r\n theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp\r\n s = max((abs(theta), abs(dx), abs(dp)))\r\n gamma = s * np.sqrt((theta / s) ** 2 - (dx / s) * (dp / s))\r\n if (stp > stx):\r\n gamma = -gamma\r\n\r\n p = (gamma - dp) + theta\r\n q = ((gamma - dp) + gamma) + dx\r\n r = p / q\r\n stpc = stp + r * (stx - stp)\r\n stpq = stp + (dp / (dp - dx)) * (stx - stp)\r\n if (abs(stpc - stp) > abs(stpq - stp)):\r\n stpf = stpc\r\n else:\r\n stpf = stpq\r\n\r\n # brackt = true\r\n\r\n elif (abs(dp) < abs(dx)):\r\n # Third case: A lower function value, derivatives of the same sign,\r\n # and the magnitude of the derivative decreases.\r\n\r\n # The cubic step is computed only if the cubic ts to infinity\r\n # in the direction of the step or if the minimum of the cubic\r\n # is beyond stp. Otherwise the cubic step is defined to be the\r\n # secant step.\r\n\r\n theta = 3.0 * (fx - fp) / (stp - stx) + dx + dp\r\n s = max((abs(theta), abs(dx), abs(dp)))\r\n\r\n # The case gamma = 0 only arises if the cubic does not t\r\n # to infinity in the direction of the step.\r\n\r\n gamma = s * np.sqrt(max(0.0, (theta / s) ** 2 - (dx / s) * (dp / s)))\r\n if (stp > stx):\r\n gamma = -gamma\r\n\r\n p = (gamma - dp) + theta\r\n q = (gamma + (dx - dp)) + gamma\r\n r = p / q\r\n if (r < 0.0 and gamma != 0.0):\r\n stpc = stp + r * (stx - stp)\r\n elif (stp > stx):\r\n stpc = stpmax\r\n else:\r\n stpc = stpmin\r\n\r\n stpq = stp + (dp / (dp - dx)) * (stx - stp)\r\n\r\n if (brackt):\r\n\r\n # A minimizer has been bracketed. If the cubic step is\r\n # closer to stp than the secant step, the cubic step is\r\n # taken, otherwise the secant step is taken.\r\n\r\n if (abs(stpc - stp) < abs(stpq - stp)):\r\n stpf = stpc\r\n else:\r\n stpf = stpq\r\n\r\n if (stp > stx):\r\n stpf = min(stp + p66 * (sty - stp), stpf)\r\n else:\r\n stpf = max(stp + p66 * (sty - stp), stpf)\r\n\r\n else:\r\n\r\n # A minimizer has not been bracketed. If the cubic step is\r\n # farther from stp than the secant step, the cubic step is\r\n # taken, otherwise the secant step is taken.\r\n\r\n if (abs(stpc - stp) > abs(stpq - stp)):\r\n stpf = stpc\r\n else:\r\n stpf = stpq\r\n\r\n stpf = min(stpmax, stpf)\r\n stpf = max(stpmin, stpf)\r\n\r\n else:\r\n # Fourth case: A lower function value, derivatives of the same sign,\r\n # and the magnitude of the derivative does not decrease. If the\r\n # minimum is not bracketed, the step is either stpmin or stpmax,\r\n # otherwise the cubic step is taken.\r\n\r\n if (brackt):\r\n theta = 3.0 * (fp - fy) / (sty - stp) + dy + dp\r\n s = max((abs(theta), abs(dy), abs(dp)))\r\n gamma = s * np.sqrt((theta / s) ** 2 - (dy / s) * (dp / s))\r\n if (stp > sty):\r\n gamma = -gamma\r\n\r\n p = (gamma - dp) + theta\r\n q = ((gamma - dp) + gamma) + dy\r\n r = p / q\r\n stpc = stp + r * (sty - stp)\r\n stpf = stpc\r\n elif (stp > stx):\r\n stpf = stpmax\r\n else:\r\n stpf = stpmin\r\n return stpf", "def random_points_ascending_hillclimber(house, all_houses, waters, total_value_map):\n total_value_map_NEW = total_value_map\n\n # check in welke range het huis geplaats kan worden, niet kijkend naar water of andere \n rangex = MAXIMUM_WIDTH - house.width\n rangey = MAXIMUM_HEIGHT - house.length\n\n for x in range(100):\n # maak random x en y coördinaat\n randomizex = rangex * random()\n randomizey = rangey * random()\n\n # bewaar oude locaties\n tempx = house.bottom_left[0]\n tempy = house.bottom_left[1]\n \n # verander locatie\n bottom_left = (randomizex,randomizey)\n house.location(bottom_left)\n\n # als je je huis op nieuwe locatie kan plaatsen\n if place_house(house, all_houses, waters) == True:\n # bereken nieuw waarde map, waarin huis is verplaatst\n total_value_map_temp = 0\n for item in all_houses.values():\n for house in item:\n house.extra_meters()\n total_value_map_temp += house.totalprice()\n\n # als waarde met nieuwe locatie hoger is, verander deze\n if total_value_map_NEW < total_value_map_temp:\n total_value_map_NEW = total_value_map_temp\n # als waarde niet hoger is verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n # als huis niet geplaats kan worden, verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n\n return all_houses, total_value_map_NEW", "def _linear_nearest_neighbour(all_points: list, pivot: tuple):\n best_dist = None\n nearest = None\n for point in all_points:\n dist = _distance(point, pivot)\n if best_dist is None or dist < best_dist:\n best_dist = dist\n nearest = point\n return best_dist, nearest", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n\n pq = PriorityQueue()\n visited = []\n start = problem.getStartState()\n mapper = {}\n \n mapper[problem.getStartState()] = None\n pq.push(problem.getStartState(), 1)\n\n while (not pq.isEmpty()):\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n #util.raiseNotDefined()\n if not (point in visited):\n visited.append(point)\n succs = problem.getSuccessors(point)\n succs.reverse()\n for child in succs:\n if not (child[0] in mapper):\n pq.push(child[0], child[2]) #child has (xy, direction, weight)\n mapper[child[0]] = point, child[1]\n # util.raiseNotDefined()", "def _reset_upper_lower_points(keys:list,move_up:dict,num_points:dict,upper_point:dict,lower_point:dict)->tuple:\n for key in keys:\n if move_up[key]:\n if upper_point[key]<num_points[key]-1:\n upper_point[key]+=1\n lower_point[key]+=1\n else:\n if lower_point[key]>0:\n upper_point[key]-=1\n lower_point[key]-=1\n return (lower_point,upper_point)", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def backtracking(f, slope, x, p, a=1, rho=.9, c=10e-4):\n while f(x + a*p) > f(x) + c*a*slope*p:\n a = rho*a\n return a", "def my_line_search(c1, c2, pk, xk, old_x=None, alpha_0=0, alpha_max=1, method=\"sd\"):\n phi0 = phi_function(0, pk, xk)\n dphi0 = phi_prime(pk, xk)\n\n # choose alpha_1\n if old_x is not None and dphi0 != 0 and method == \"sd\":\n alpha_1 = min(1.0, 1.01 * 2 * (rosenbock2Nd(xk, 0) - rosenbock2Nd(old_x, 0)) / dphi0)\n else:\n alpha_1 = 1.0\n\n if alpha_1 <= 0:\n alpha_1 = 1.0\n\n if alpha_max is not None:\n alpha_1 = min(alpha_1, alpha_max)\n\n alpha_vec = [alpha_0, alpha_1]\n\n i = 1\n while True:\n # alpha i = ai\n alpha_i = alpha_vec[i]\n # compute phi(ai)\n phi_i = phi_function(alpha_i, pk, xk)\n # Armijo condition.\n if phi_i > phi0 + c1 * alpha_i * dphi0 \\\n or (i > 1 and phi_function(alpha_i, pk, xk) >= phi_function(alpha_vec[i - 1], pk, xk)):\n return zoom(alpha_low=alpha_vec[i - 1], alpha_high=alpha_vec[i], xk=xk, pk=pk, c1=c1, c2=c2), i\n\n # compute phi prime at alpha i (ai).\n phi_prime_alpha_i = phi_prime(pk, xk + alpha_i * pk)\n # curvature condition.\n if abs(phi_prime_alpha_i) <= -c2 * dphi0:\n return alpha_i, i\n\n if phi_prime_alpha_i >= 0:\n return zoom(alpha_low=alpha_i, alpha_high=alpha_vec[i - 1], xk=xk, pk=pk, c1=c1, c2=c2), i\n\n alpha_vec.append(random.uniform(alpha_i, alpha_max))\n i += 1", "def fn(x, y):\n x, y = abs(x), abs(y) # symmetry \n if x == y == 0: return 0 \n if x + y == 2: return 2\n return 1 + min(fn(x-2, y-1), fn(x-1, y-2))", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def advance_retreat_method(loss_function: rosenbrock, start: point, direction: list, step=0, delta=0.1) -> tuple:\n alpha0, point0 = step, start\n\n alpha1 = alpha0 + delta\n point1 = point0 + point(direction[0] * delta, direction[1] * delta)\n if loss_function.f(point0) < loss_function.f(point1):\n while True:\n delta *= 2\n alpha2 = alpha0 - delta\n point2 = point0 - point(direction[0] * delta, direction[1] * delta)\n if loss_function.f(point2) < loss_function.f(point0):\n alpha1, alpha0 = alpha0, alpha2\n point1, point0 = point0, point2\n else:\n return alpha2, alpha1\n else:\n while True:\n delta *= 2\n alpha2 = alpha1 + delta\n point2 = point1 + point(direction[0] * delta, direction[1] * delta)\n if loss_function.f(point2) < loss_function.f(point1):\n alpha0, alpha1 = alpha1, alpha2\n point0, point1 = point1, point2\n else:\n return alpha0, alpha2", "def fn(u):\n if u == n-1: return 1 # boundary condition \n ans = 0\n for v, _ in graph[u]: \n if dist[u] > dist[v]: ans += fn(v)\n return ans", "def get_close_interest_points(\n points, dicvar=None, var=None, around=2, points_compare=None\n):\n grad_to_lat = 1 / 111\n grad_to_lon = 1 / 85\n lons = [p[0] for p in points]\n lats = [p[1] for p in points]\n min_lons = [lon - around * grad_to_lon for lon in lons]\n max_lons = [lon + around * grad_to_lon for lon in lons]\n min_lats = [lat - around * grad_to_lat for lat in lats]\n max_lats = [lat + around * grad_to_lat for lat in lats]\n if points_compare is None:\n df = pd.DataFrame({\"lon\": dicvar[\"lon\"], \"lat\": dicvar[\"lat\"]})\n elif points_compare is not None:\n lons = [p[0] for p in points]\n lats = [p[1] for p in points]\n df = pd.DataFrame({\"lon\": lons, \"lat\": lats})\n points_around = np.empty((len(points),), dtype=\"float64\")\n i = 0\n for min_lon, max_lon, min_lat, max_lat in tqdm(\n zip(min_lons, max_lons, min_lats, max_lats),\n desc=f\"### GETTING POINTS FOR {var}\",\n ):\n mybool = (\n (df[\"lat\"] >= min_lat)\n & (df[\"lat\"] <= max_lat)\n & (df[\"lon\"] >= min_lon)\n & (df[\"lon\"] <= max_lon)\n )\n try:\n points_around[i] = df.loc[mybool, :].shape[0]\n except:\n points_around[i] = 0\n i += 1\n return points_around", "def try_waypoints(waypoint_data, current_point, segmented_points):\n\n # waypoint_data will be a list [waypoint_n, ... , waypoint_w]\n # where waypoint_n ... w is (lat, lng)\n\n # store the waypoints retreived and compare their crime_index\n # ret [{dicte}, {dictw}]\n waypoint_geohash_data_all = get_position_geohash(waypoint_data)\n crime_index_storage = []\n for data in waypoint_geohash_data_all:\n crime_index_storage.append(data['crime_index'])\n crime_index_storage.append(current_point['crime_index'])\n\n lowest_crime_index = min(*crime_index_storage)\n\n # check and assemble dict for lowest_crime_index waypoint\n generate_waypoint(lowest_crime_index,\n waypoint_geohash_data_all,\n segmented_points)", "def nearestNeighbours(xObs, xMod):\n\n\txObs=np.asarray(xObs)\n\txMod=np.asarray(xMod)\n\tkept=np.copy(xMod)\n\tLObs=len(xObs)\n\tLMod=len(xMod)\n\txObs=np.expand_dims(xObs, axis=1)\n\txMod=np.expand_dims(xMod, axis=1)\n\txObs=np.repeat(xObs, LMod, axis=1)\n\txMod=np.repeat(xMod, LObs, axis=1)\n\txMod=xMod.T\n\tdiffs=xObs-xMod\n\t#interesting point: the smallest point (the one you are looking for) will be\n\t#the point just before the first negative value in a row\n\t#this could be used in an alternative method much to your advantage\n\ttemp=np.greater(diffs,0)\n\taltered=temp*diffs + np.invert(temp)*(10**30)\n\tmins=altered.min(1)\t\n\tmins=np.expand_dims(mins, axis=1)\n\tmins=np.repeat(mins, LMod, axis=1)\n\tplaced=np.equal(mins, diffs)*np.repeat(np.expand_dims(np.arange(0,LMod), axis=1), LObs, axis=1).T\n\tplaced1=np.sum(placed, axis=1)\n\tclosest1=kept[placed1]\n\tplaced2=np.add(placed1,1)\n\t#below deals with the fringe case; when there is no model x value greater than\n\t#a specific observation x value \n\ttemp=np.where(placed2 > (len(kept)-1))\n\tplaced2[temp]=placed2[temp]-1\n\tclosest2=kept[placed]\n\t#print(\"-----------------\")\n\t#print(closest1, closest2)\n\treturn placed1, placed2, closest1", "def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)", "def findDJI(startVert, endVert): \n\n dist = {}\n dist[startVert] = 0\n prev = {}\n prev[startVert] = None\n q = heap.Heap()\n q.insert(startVert, dist[startVert])\n while (q):\n # current is the next unvisited, closest node to start\n current = q.pop()\n \n # check to see if we found a better path to any\n # of current's neighbors\n for n in current.getConnections():\n if n not in dist:\n # we found a new node\n dist[n] = dist[current] + current.getWeight(n)\n prev[n] = current\n q.insert(n, dist[n])\n if dist[current] + current.getWeight(n) < dist[n]:\n # we found a better path\n dist[n] = dist[current] + current.getWeight(n)\n prev[n] = current\n q.decreaseKey(n, dist[n])\n if endVert in dist:\n return backtrack(startVert, endVert, prev)\n else:\n return None", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def pythTripletFinder(sum1):\n a = 0\n b = 0\n sum1 = int(sum1)\n for x in range(1,sum1):\n\tfor y in range(1,sum1):\n\t if (x*x + y*y) == (sum1 - x -y)**2 :\n\t\treturn x,y,sum1-x-y\n return 0,0,0", "def steepest_descent_method(fun, dfun, xk, E, minimize):\n counter = 0\n s0 = -dfun(xk)\n p = lambda a: fun(xk + s0 * a)\n a0, f_appeals = minimize(p, 0, 1, E)\n counter += f_appeals\n xk_next = xk + a0 * s0\n\n while np.linalg.norm(xk_next - xk) > E:\n xk = xk_next\n sk = -dfun(xk)\n p = lambda a: fun(xk + sk * a)\n ak, f_appeals = minimize(p, 0, 1, E)\n counter += f_appeals\n xk_next = xk + ak * sk\n\n return xk_next, fun(xk_next), counter", "def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]", "def my_Newton( fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs( fct( xn**(i + 1)) - fct( xn**i)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print( i, 'fct value', abs( fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs( fct( xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan", "def extract_staypoints(positionfixes, method='sliding', \n dist_threshold=100, time_threshold=5*60, epsilon=100,\n dist_func=haversine_dist):\n ret_staypoints = pd.DataFrame(columns=['arrival_at', 'departure_at', 'geom'])\n\n # TODO We have to make sure that the user_id is taken into account.\n\n if method == 'sliding':\n # Algorithm from Li et al. (2008). For details, please refer to the paper.\n pfs = positionfixes.sort_values('tracked_at').to_dict('records')\n num_pfs = len(positionfixes)\n\n i = 0\n j = 1\n while i < num_pfs:\n if j == num_pfs:\n # We're at the end, this can happen if in the last \"bin\", \n # the dist_threshold is never crossed anymore.\n break\n else:\n j = i + 1\n while j < num_pfs:\n dist = dist_func(pfs[i]['geom'].x, pfs[i]['geom'].y, \n pfs[j]['geom'].x, pfs[j]['geom'].y)\n\n if dist > dist_threshold:\n delta_t = pfs[j]['tracked_at'] - pfs[i]['tracked_at']\n if delta_t.total_seconds() > time_threshold:\n staypoint = {}\n staypoint['user_id'] = pfs[i]['user_id']\n staypoint['geom'] = Point(np.mean([pfs[k]['geom'].x for k in range(i, j)]), \n np.mean([pfs[k]['geom'].y for k in range(i, j)]))\n staypoint['elevation'] = np.mean([pfs[k]['elevation'] for k in range(i, j)])\n staypoint['started_at'] = pfs[i]['tracked_at']\n staypoint['finished_at'] = pfs[j]['tracked_at']\n ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)\n # TODO Discussion: Is this last point really a staypoint? As we don't know if the\n # person \"moves on\" afterwards...\n if j == num_pfs - 1:\n staypoint = {}\n staypoint['user_id'] = pfs[j]['user_id']\n staypoint['geom'] = Point(pfs[j]['geom'].x, pfs[j]['geom'].y)\n staypoint['elevation'] = pfs[j]['elevation']\n staypoint['started_at'] = pfs[j]['tracked_at']\n staypoint['finished_at'] = pfs[j]['tracked_at']\n ret_staypoints = ret_staypoints.append(staypoint, ignore_index=True)\n i = j\n break\n j = j + 1\n\n elif method == 'dbscan':\n pass\n\n\n ret_staypoints = gpd.GeoDataFrame(ret_staypoints, geometry='geom')\n\n return ret_staypoints", "def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ", "def fun(x0, x1, y0, y1, xx, yy): \n\n # Look for point of intersection between interpolated curve between nodes in x, y\n # and the normal to the face between nodes (x0, y0) and (x1, y1)\n # Transform coordinate axes\n # Center of face is xs, ys\n xs = (x0 + x1)/2.\n ys = (y0 + y1)/2.\n\n if abs(y1 - y0) > abs(x1 - x0):\n theta = arctan((x1 - x0)/(y1 - y0))\n theta2 = arctan((xx - xs)/(yy - ys))\n dy = (yy - ys)/cos(theta2)\n xn = copy(xx)\n yn = copy(yy)\n xn = dy*sin(theta2 - theta)\n yn = dy*cos(theta2 - theta)\n w = barycentric_weight(yn)\n y2 = - yn\n f = zeros(len(y2), float)\n ss = sum(w/y2)\n f[:] = w/y2/ss\n dy = dot(f, xn)\n xny = xs + dy*sin(theta + pi/2.)\n yny = ys + dy*cos(theta + pi/2.)\n\n else: \n theta = arctan((y1 - y0)/(x1 - x0))\n theta2 = arctan((yy - ys)/(xx - xs))\n dx = (xx - xs)/cos(theta)\n xn = copy(xx)\n yn = copy(yy)\n xn = dx*cos(theta2 - theta)\n yn = dx*sin(theta2 - theta)\n w = barycentric_weight(xn)\n x2 = - xn\n f = zeros(len(x2), float)\n ss = sum(w/x2)\n f[:] = w/x2/ss\n dy = dot(f, yn)\n xny = xs + dy*cos(theta + pi/2.)\n yny = ys + dy*sin(theta + pi/2.)\n \n return xny, yny" ]
[ "0.6356492", "0.6195878", "0.5991574", "0.58792716", "0.584689", "0.58225703", "0.5817831", "0.5659458", "0.5653146", "0.56466967", "0.56169575", "0.56137437", "0.5609491", "0.55890185", "0.5563145", "0.55528545", "0.55420315", "0.55300105", "0.552903", "0.55165887", "0.54849434", "0.5465693", "0.5448942", "0.54445964", "0.54340845", "0.5428142", "0.54074407", "0.5391133", "0.53836876", "0.5381512", "0.5376614", "0.53621906", "0.535667", "0.53412175", "0.53385043", "0.5334061", "0.5319092", "0.5310655", "0.5282092", "0.5277969", "0.52776015", "0.5268402", "0.5260915", "0.5257837", "0.525419", "0.52319527", "0.52184665", "0.5214345", "0.5199785", "0.51930237", "0.5180216", "0.51800674", "0.51675355", "0.51566243", "0.514789", "0.5136972", "0.51364315", "0.51353365", "0.51339716", "0.5128374", "0.5127562", "0.51223767", "0.51215553", "0.51200235", "0.50980043", "0.5092775", "0.50898695", "0.50883883", "0.5087958", "0.50838584", "0.5071932", "0.50701773", "0.506864", "0.5066758", "0.50656646", "0.5060289", "0.5058903", "0.5055175", "0.50535786", "0.50525", "0.5051464", "0.5049036", "0.504337", "0.5042207", "0.5041248", "0.50400907", "0.50349116", "0.50344616", "0.5029865", "0.50297403", "0.50245035", "0.50238395", "0.5022998", "0.5017892", "0.5015724", "0.50134826", "0.50128806", "0.5011929", "0.50117373", "0.50096285", "0.5005391" ]
0.0
-1
Fit the histogram of the input image under mask with the reference image.
def ce_fit(inp_image, ref_image, mask_image): hist_res = Util.histc(ref_image, inp_image, mask_image) args = hist_res["args"] scale = hist_res["scale"] data = [hist_res['data'], inp_image, hist_res["ref_freq_bin"], mask_image, int(hist_res['size_img']), hist_res['hist_len']] res = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data) resu = ["Final Parameter [A,B]:", res[0], "Final Chi-square :", -1*res[1], "Number of Iteration :", res[2]] corrected_image = inp_image*res[0][0] + res[0][1] result = [resu,"Corrected Image :",corrected_image] del data[:], args[:], scale[:] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hist2d(x,y,nbins = 50 ,maskval = 0,saveloc = '',labels=[],slope = 1,sloperr = 0):\n\t# Remove NANs and masked values\n\tgood = where((isnan(x) == False) & (isnan(y) == False) & (x != maskval) & (y != maskval))\n\tx = x[good]\n\ty = y[good]\n\n\t# Create histogram\n\tH,xedges,yedges = histogram2d(x,y,bins=nbins)\n\t# Reorient appropriately\n\tH = rot90(H)\n\tH = flipud(H)\n\t# Mask zero value bins\n\tHmasked = ma.masked_where(H==0,H)\n\t# Find average values in y:\n\tyavgs = []\n\tystds = []\n\txposs = []\n\tfor j in range(len(xedges)-1):\n\t\ttoavg = where((x > xedges[j]) & (x < xedges[j+1]))\n\t\txpos = np.mean(x[toavg])\n\t\tyavg = np.median(y[toavg])\n\t\tystd = np.std(y[toavg])/len(y[toavg])\n\t\txposs.append(xpos)\n\t\tyavgs.append(yavg)\n\t\tystds.append(ystd)\n\t# Begin creating figure\n\tplt.figure(figsize=(12,10))\n\t# Make histogram pixels with logscale\n\tplt.pcolormesh(xedges,yedges,Hmasked,\n\t norm = LogNorm(vmin = Hmasked.min(),\n\t vmax = Hmasked.max()),\n\t\t \t cmap = plt.get_cmap('Spectral_r'))\n\t# Create fit line x-array\n\tuplim = nmax(x)+5\n\tdolim = nmin(x)-5\n\tx_range = arange(dolim,uplim)\n\t# Plot fit line\n\tplt.plot(x_range,slope*x_range,color = 'royalblue',linewidth = 3,label = 'Slope = {0}, Uncertainty = {1}'.format(slope,sloperr))\n\t# Plot average points\n\tplt.errorbar(xposs,yavgs,yerr = ystds,fmt = 'D',color='k',markersize = 5)\n\t# Set plot limits\n\tplt.xlim(dolim+5,uplim-5)\n\tplt.ylim(nmin(y),nmax(y))\n\t# Add colourbar\n\tcbar = plt.colorbar()\n\t# Add labels\n\tif labels != []:\n\t title,xlabel,ylabel,zlabel = labels\n\t plt.xlabel(xlabel)\n\t plt.ylabel(ylabel)\n\t plt.title(title)\n\t cbar.ax.set_ylabel(zlabel)\n\t plt.legend(loc = 'best',fontsize = 15)\n\t# Save plot\n\tif saveloc != '':\n\t\tplt.savefig(saveloc)\n\tplt.close()\n\t# Return histogram\n\treturn xedges,yedges,Hmasked", "def overlay_prob(image, mask, cutoff=0.5):\n if len(image.shape) == 3:\n image = image[: ,: ,0]\n if len(mask.shape) == 3:\n mask = mask[: ,: ,0]\n if np.amax(image) > 100:\n image = image /255\n\n mask = mask>=cutoff\n mask = mask.astype(int)\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def fit_reference(self, img):\n if self.refzone:\n meanfit, fit = self._get_reference_fit(img)\n img = self._overlay_ref_fit(img, meanfit, fit)\n self._set_offset(*meanfit)\n\n return img", "def describe(self, image, mask=None):\n histogram = cv2.calcHist([image], [0, 1, 2], mask, self.bins, [0, 256, 0, 256, 0, 256])\n cv2.normalize(histogram, histogram)\n\n return histogram.flatten()", "def _hist_match_image(im, targ, mask, inplace=True):\n # Copy?\n if not inplace:\n im = im.copy()\n\n # Add a small amount of random noise to break ties for sorting in next\n # step.\n im += 0.1 * np.random.rand(*im.shape)\n\n # Sort image pixels (we actually only need indices of sort)\n if mask is None:\n idcs = np.argsort(im.flat)\n else:\n idcs = np.argsort(im[mask].flat)\n\n # Replace image histogram with target histogram, using idcs to place\n # pixels at correct positions\n svim = np.empty(len(idcs))\n svim[idcs] = targ\n if mask is None:\n im[:] = svim.reshape(im.shape)\n else:\n im[mask] = svim\n\n # Return?\n if not inplace:\n return im", "def calculateHistogram(self):\n \n # Define color map\n colors = [ (255,0,0),(0,255,0),(0,0,255) ]\n # Define empty image to plot histogram in\n plot_to_fill = np.zeros((280,400,3))\n # Define bins of the histogram\n bins = np.arange(256).reshape(256,1)\n \n # Boucle sur les canaux\n for channel, color in enumerate(colors):\n # Calcul de l'histogramme\n hist_item = cv2.calcHist(self.frame,[channel],None,[256],[0,256])\n # Normalisation\n cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)\n # Conversion\n hist = np.int32(np.around(hist_item))\n pts = np.int32(np.column_stack((bins, hist)))\n cv2.polylines(plot_to_fill, [pts], False, color)\n # Mettre dans le bon sens\n histplot = np.flipud(plot_to_fill)\n histplot = np.uint8(histplot)\n \n # Conversion en objet QPixelMap\n self.histplot_qpix = self.convertToQPixelmap(histplot)", "def predict_mask(logit, EMPTY_THRESHOLD, MASK_THRESHOLD):\n #pred mask 0-1 pixel-wise\n #n = logit.shape[0]\n IMG_SIZE = logit.shape[-1] #256\n #EMPTY_THRESHOLD = 100.0*(IMG_SIZE/128.0)**2 #count of predicted mask pixles<threshold, predict as empty mask image\n #MASK_THRESHOLD = 0.22\n #logit = torch.sigmoid(torch.from_numpy(logit)).view(n, -1)\n #pred = (logit>MASK_THRESHOLD).long()\n #pred[pred.sum(dim=1) < EMPTY_THRESHOLD, ] = 0 #bug here, found it, the bug is input shape is (256, 256) not (16,256,256)\n logit = sigmoid(logit)#.reshape(n, -1)\n pred = (logit>MASK_THRESHOLD).astype(np.int)\n if pred.sum() < EMPTY_THRESHOLD:\n return np.zeros(pred.shape).astype(np.int)\n else:\n return pred", "def mask_and_fit(mask, binary_warped, flag):\n img = cv2.bitwise_and(binary_warped, binary_warped, mask=mask)\n x, y = extract_pixels(img)\n fit, foundFlag, confidence_index = check_and_fit(x, y, flag)\n return fit, foundFlag, confidence_index", "def _overlay_ref_fit(self, img, mean, fit, off=25):\n\n def plus(img, x, y, val=0, r=10):\n img[x - 1:x, y - r:y + r], img[x - r:x + r, y - 1:y] = val, val\n return img\n\n if len(self.refzone) != 4:\n return img\n\n centers = [(self.ref_rc[0] - off, self.ref_rc[1] - off),\n (self.ref_rc[0] - off, self.ref_rc[1] + off),\n (self.ref_rc[0] + off, self.ref_rc[1] - off),\n (self.ref_rc[0] + off, self.ref_rc[1] + off)]\n\n img = plus(img, self.ref_rc[0], self.ref_rc[1], val=150, r=15) # final mean offset\n img = plus(img, self.ref_rc[0] + mean[0], self.ref_rc[1] + mean[1], val=0)\n for [x0, x1, y0, y1], [x_off, y_off], (cx, cy) in zip(self.refzone, fit, centers):\n img = plus(img, cx, cy, val=120, r=15) # panel fitted\n img = plus(img, cx + x_off, cy + y_off, val=0) # panel reference\n img = plus(img, x0, y0, val=150) # expected reference\n img = plus(img, x1, y1, val=150) #\n img = plus(img, x0 + x_off, y0 + y_off, val=0) # actual fitted\n img = plus(img, x1 + x_off, y1 + y_off, val=0) #\n\n return img", "def histogram_stretching(img):\n\n img_copy = np.copy(img)\n\n img_min = img_copy.min()\n img_max = img_copy.max()\n\n if img_min == img_max:\n return None\n\n img_copy = (img_copy-img_min)/(img_max-img_min) * 255\n\n return img_copy", "def create_image_fits(base_dir,fits_img,outroot, bin_file, temp_file):\n bins, min_x, max_x, min_y, max_y = read_in(base_dir+'/'+bin_file,base_dir+'/'+temp_file)\n # Create image array\n x_len = int(max_x-min_x)\n y_len = int(max_y-min_y)\n temp_array = np.zeros((x_len,y_len))\n percentage_array = np.zeros((x_len,y_len))\n for bin in bins:\n for pixel in bin.pixels:\n #print(bin.temp)\n try:\n temp_array[int(pixel.pix_x-1),int(pixel.pix_y-1)] = int(bin.temp)\n percentage_array[int(pixel.pix_x-1),int(pixel.pix_y-1)] = float(bin.percentage)\n except:\n #print(bin.temp)\n pass\n # Copy header\n fits_ = fits.open(base_dir+'/'+fits_img)\n hdr = header=fits_[0].header\n # Change image\n hdu = fits.PrimaryHDU(temp_array)\n hdul = fits.HDUList([hdu])\n fits.writeto(base_dir+'/component_bins.fits', temp_array.T, hdr, overwrite=True)\n fits.writeto(base_dir+'/percentage_bins.fits', percentage_array.T, hdr, overwrite=True)", "def equalize_hist(input):\n return np.float32(skimage.exposure.equalize_hist(input.numpy()))", "def SetInput(self, input: 'itkHistogramF') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFF_SetInput(self, input)", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def find_histogram(vol, hist, mini, maxi, mask, use_mask):\n validsize = 0\n hist = np.zeros(hist.size, dtype=int)\n if mini == maxi:\n return -1\n\n fA = float(hist.size)/(maxi-mini)\n fB = (float(hist.size)*float(-mini)) / (maxi-mini)\n\n if use_mask:\n a = vol[mask > 0.5].flatten()\n else:\n a = vol.flatten()\n\n a = (a*fA + fB).astype(int)\n h = hist.size - 1\n\n for i in np.arange(a.size):\n hist[max(0, min(a[i], h))] += 1\n validsize += 1\n\n return hist, validsize", "def get_histogram(folder_name, image_name, save_location):\n print(\"Getting histogram for:\" + str(folder_name) + '/' + str(image_name))\n image = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n plt.hist(image.ravel(), 256, [0, 65535])\n plt.xlabel('Pixel Intensity')\n plt.ylabel('Number of pixels')\n plt.title('Histogram of normalised reference image. Overnight2')\n plt.savefig(save_location + 'histogram.png')\n plt.savefig(save_location + 'histogram.eps', format='eps')\n # plt.show()", "def _histogram_equalize_image(image, hist_orig):\n cum_hist = np.cumsum(hist_orig)\n cum_hist = (cum_hist * 255) / cum_hist[-1]\n\n image = np.interp(image, np.linspace(0, 1, 256), np.round(cum_hist))\n\n return utils.normalize_image(image)", "def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist", "def histogram_equalize(im_orig):\n\n color_flag = False\n image = im_orig\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n image *= NORMALIZE\n hist_orig, bins = np.histogram(image, range(BINS))\n hist_cum = np.cumsum(hist_orig) #cumulative distribution function\n\n cum = ((hist_cum - hist_cum.min()) / ( hist_cum.max() - hist_cum.min())) * NORMALIZE\n\n im_eq = cum[image.astype(np.uint8)]\n\n hist_eq, bins = np.histogram(im_eq, range(BINS)) #before getting back to float64 does the histogram)\n\n im_eq /= NORMALIZE\n im_eq = im_eq.astype(np.float64)\n\n\n if color_flag:\n y_im[:, :, 0] = im_eq\n im_eq = yiq2rgb(y_im)\n\n im_eq = im_eq.clip(0,1)\n return [im_eq, hist_orig, hist_eq]", "def Histogram_Matching(inImFile, outImFile, refImFile,\n number_of_histogram_levels=1024,\n number_of_match_points=7,\n threshold_at_mean_intensity=False):\n inputIm = sitk.ReadImage(inImFile)\n referenceIm = sitk.ReadImage(refImFile)\n histMatchingFilter = sitk.HistogramMatchingImageFilter()\n histMatchingFilter.SetNumberOfHistogramLevels(number_of_histogram_levels)\n histMatchingFilter.SetNumberOfMatchPoints(number_of_match_points)\n histMatchingFilter.SetThresholdAtMeanIntensity(threshold_at_mean_intensity)\n outputIm = histMatchingFilter.Execute(inputIm, referenceIm)\n if outImFile is not None:\n sitk.WriteImage(outputIm, outImFile, True)\n return outputIm", "def analyze_index(index_array, mask, histplot=False, bins=100, min_bin=0, max_bin=1, label=\"default\"):\n params.device += 1\n\n debug = params.debug\n params.debug = None\n analysis_image = None\n\n if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:\n fatal_error(\"Mask should be a binary image of 0 and nonzero values.\")\n\n if len(np.shape(index_array.array_data)) > 2:\n fatal_error(\"index_array data should be a grayscale image.\")\n\n # Mask data and collect statistics about pixels within the masked image\n masked_array = index_array.array_data[np.where(mask > 0)]\n masked_array = masked_array[np.isfinite(masked_array)]\n\n index_mean = np.nanmean(masked_array)\n index_median = np.nanmedian(masked_array)\n index_std = np.nanstd(masked_array)\n\n # Set starting point and max bin values\n maxval = max_bin\n b = min_bin\n\n # Calculate observed min and max pixel values of the masked array\n observed_max = np.nanmax(masked_array)\n observed_min = np.nanmin(masked_array)\n\n # Auto calculate max_bin if set\n if type(max_bin) is str and (max_bin.upper() == \"AUTO\"):\n maxval = float(round(observed_max, 8)) # Auto bins will detect maxval to use for calculating labels/bins\n if type(min_bin) is str and (min_bin.upper() == \"AUTO\"):\n b = float(round(observed_min, 8)) # If bin_min is auto then overwrite starting value\n\n # Print a warning if observed min/max outside user defined range\n if observed_max > maxval or observed_min < b:\n print(\"WARNING!!! The observed range of pixel values in your masked index provided is [\" + str(observed_min) +\n \", \" + str(observed_max) + \"] but the user defined range of bins for pixel frequencies is [\" + str(b) +\n \", \" + str(maxval) + \"]. Adjust min_bin and max_bin in order to avoid cutting off data being collected.\")\n\n # Calculate histogram\n hist_val = [float(i[0]) for i in cv2.calcHist([masked_array.astype(np.float32)], [0], None, [bins], [b, maxval])]\n bin_width = (maxval - b) / float(bins)\n bin_labels = [float(b)]\n plotting_labels = [float(b)]\n for i in range(bins - 1):\n b += bin_width\n bin_labels.append(b)\n plotting_labels.append(round(b, 2))\n\n # Make hist percentage for plotting\n pixels = cv2.countNonZero(mask)\n hist_percent = [(p / float(pixels)) * 100 for p in hist_val]\n\n params.debug = debug\n\n if histplot is True:\n dataset = pd.DataFrame({'Index Reflectance': bin_labels,\n 'Proportion of pixels (%)': hist_percent})\n fig_hist = (ggplot(data=dataset,\n mapping=aes(x='Index Reflectance',\n y='Proportion of pixels (%)'))\n + geom_line(color='red')\n + scale_x_continuous(breaks=bin_labels, labels=plotting_labels))\n analysis_image = fig_hist\n if params.debug == 'print':\n fig_hist.save(os.path.join(params.debug_outdir,\n str(params.device) + index_array.array_type + \"hist.png\"), verbose=False)\n elif params.debug == 'plot':\n print(fig_hist)\n\n outputs.add_observation(sample=label, variable='mean_' + index_array.array_type,\n trait='Average ' + index_array.array_type + ' reflectance',\n method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,\n value=float(index_mean), label='none')\n\n outputs.add_observation(sample=label, variable='med_' + index_array.array_type,\n trait='Median ' + index_array.array_type + ' reflectance',\n method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,\n value=float(index_median), label='none')\n\n outputs.add_observation(sample=label, variable='std_' + index_array.array_type,\n trait='Standard deviation ' + index_array.array_type + ' reflectance',\n method='plantcv.plantcv.hyperspectral.analyze_index', scale='reflectance', datatype=float,\n value=float(index_std), label='none')\n\n outputs.add_observation(sample=label, variable='index_frequencies_' + index_array.array_type,\n trait='index frequencies', method='plantcv.plantcv.analyze_index', scale='frequency',\n datatype=list, value=hist_percent, label=bin_labels)\n\n if params.debug == \"plot\":\n plot_image(masked_array)\n elif params.debug == \"print\":\n print_image(img=masked_array, filename=os.path.join(params.debug_outdir, str(params.device) +\n index_array.array_type + \".png\"))\n # Store images\n outputs.images.append(analysis_image)\n\n return analysis_image", "def create_fixed_hist(self):\n hist = cv2.calcHist([self.obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n self.hist = cv2.normalize(hist).flatten()\n print self.hist", "def adaptive_hist(image):\n mask = np.zeros(image.shape[:2], np.uint8)\n # spatially weighted by Gaussian distribtuion?\n mask = cv2.ellipse(mask, (image.shape[1] // 2,image.shape[0] // 2),\n (image.shape[1] // 2,image.shape[0] // 2), 0, 0, 360, 255, -1)\n\n # RGB color histogram\n hist1 = cv2.calcHist([image], [0], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([image], [1], mask, [16], [0, 256]).reshape(1, -1)\n hist3 = cv2.calcHist([image], [2], mask, [16], [0, 256]).reshape(1, -1)\n rgb_hist = np.concatenate((hist1, hist2, hist3), axis=1)\n cv2.normalize(rgb_hist, rgb_hist)\n\n # HSV color histogram\n img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n hist1 = cv2.calcHist([img_hsv], [0], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([img_hsv], [1], mask, [16], [0, 256]).reshape(1, -1)\n hsv_hist = np.concatenate((hist1, hist2), axis=1)\n cv2.normalize(hsv_hist, hsv_hist)\n\n # YCrCb color histogram\n img_YCrCb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)\n hist1 = cv2.calcHist([img_YCrCb], [1], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([img_YCrCb], [2], mask, [16], [0, 256]).reshape(1, -1)\n YCrCb_hist = np.concatenate((hist1, hist2), axis=1)\n cv2.normalize(YCrCb_hist, YCrCb_hist)\n\n # Lab color histogram\n img_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n hist1 = cv2.calcHist([img_lab], [1], mask, [16], [0, 256]).reshape(1, -1)\n hist2 = cv2.calcHist([img_lab], [2], mask, [16], [0, 256]).reshape(1, -1)\n lab_hist = np.concatenate((hist1, hist2), axis=1)\n cv2.normalize(lab_hist, lab_hist)\n\n # Hog\n #image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #image_gray = cv2.resize(image_gray, (200,200))\n #hog_hist = hog(image_gray, orientations=8, block_norm = 'L2-Hys', pixels_per_cell=(50,50), cells_per_block=(1,1), visualize=False).reshape(1, -1)\n #cv2.normalize(hog_hist, hog_hist)\n\n # type?\n #type_hist = np.zeros(8).reshape(1,8) + 0.5\n #type_hist[0, int(image_path[-5])] = 1\n #cv2.normalize(type_hist, type_hist)\n\n #thist = np.transpose(np.concatenate((3 * rgb_hist, hsv_hist, YCrCb_hist, lab_hist, hog_hist), axis=1))\n thist = np.transpose(np.concatenate((3 * rgb_hist, hsv_hist, YCrCb_hist, lab_hist), axis=1))\n thist = thist / sum(thist)\n\n return np.transpose(thist)[0]", "def Adjust_Data(img,mask,feature_dict, normalize):\n ## Normalize image\n if normalize:\n img = Normalize_Image(img)\n\n ## Assume mask shape has 4 dimensions - mask is (batch, x, y, color-channel)\n ## color-channels are redundant, so just choose the first. \n mask = mask[:,:,:,0]\n \n ## Image_datagen performs interpolation when rotating, resulting in non-integer\n ## mask values. Round these back to integers before expanding the mask. \n mask = mask.round() \n mask = Expand_Mask(mask, feature_dict)\n #print(mask.shape, np.unique(mask, axis = 0))\n return (img,mask)", "def final_mask(path, output_mask, percentage=0.5):\n with fits.open(path, \"readonly\") as temp_mask:\n mask_data = temp_mask[0].data\n mask_header = temp_mask[0].header\n mask_data[mask_data >= percentage] = 1\n mask_data[mask_data < percentage] = 0\n fits.writeto(output_mask, mask_data, mask_header, clobber=True)", "def _histogram(image,\n min,\n max,\n bins):\n\n return numpy.histogram(image, bins, (min, max))[0]", "def hbond_frequency(mask):\n return mask.sum(axis=0)/len(mask)", "def _get_reference_fit(self, img):\n bw_img = 255 * (img >= self.contrast)\n fit = [center_on_box(bw_img, self.radius, self.min_ref, *ref) for ref in self.refzone]\n meanfit = num.mean(num.ma.masked_array(fit, fit == -9999), axis=0).astype('i')\n if meanfit[0] is num.ma.masked:\n raise StandardError('At least one reference box match required')\n\n return meanfit, fit", "def _compute_histogram(self, x, momentum):\n num_bins = self.histogram.size(0)\n x_detached = x.detach()\n self.bin_width = (self._max_val - self._min_val) / (num_bins - 1)\n lo = torch.floor((x_detached - self._min_val) / self.bin_width).long()\n hi = (lo + 1).clamp(min=0, max=num_bins - 1)\n hist = x.new_zeros(num_bins)\n alpha = (\n 1.0\n - (x_detached - self._min_val - lo.float() * self.bin_width)\n / self.bin_width\n )\n hist.index_add_(0, lo, alpha)\n hist.index_add_(0, hi, 1.0 - alpha)\n hist = hist / (hist.sum() + 1e-6)\n self.histogram = (1.0 - momentum) * self.histogram + momentum * hist", "def fullhistogram(img):\n maxt = img.max()\n if maxt == 0:\n return np.array([img.size])\n return nhistogram(img, np.arange(maxt+2))[0]", "def fit(self, input):\n raise NotImplementedError()", "def get_masked_ratio(mask):\n hist = mask.histogram()\n return hist[0] / np.prod(mask.size)", "def hist(img):\n bottom_half = img[img.shape[0]//2:,:] # 0:img.shape[0]//2 is the top half\n histogram = bottom_half.sum(axis=0) \n \n return histogram", "def compute_histogram(self, image):\n\n hist = [0] * 256\n x, y = image.shape[:2]\n #print(image.shape)\n for i in range(x):\n for j in range(y):\n hist[image[i, j]] += 1\n\n return hist", "def blur_mask(mask, blur_kernel, threshold=0.1):\n k = pyfits.getdata(blur_kernel)\n k = k / k.sum()\n mask = hconvolve.hconvolve(mask, k)\n mask = np.where(mask >= threshold, 1, 0).astype('int')\n return mask", "def fitTo ( self , \n dataset ,\n draw = False ,\n nbins = 50 ,\n ybins = None , \n silent = False ,\n refit = False ,\n timer = False ,\n args = () , **kwargs ) :\n if isinstance ( dataset , H2D_dset ) : dataset = dataset.dset \n elif isinstance ( dataset , ROOT.TH2 ) :\n density = kwargs.pop ( 'density' , False ) \n chi2 = kwargs.pop ( 'chi2' , False ) \n return self.fitHisto ( dataset ,\n draw = draw ,\n silent = silent ,\n density = density ,\n chi2 = chi2 , args = args , **kwargs )\n \n ## play a bit with binning cache for convolutions \n if self.yvar.hasBinning ( 'cache' ) :\n nb1 = self.yvar.getBins( 'cache' ) \n yv = getattr ( dataset , self.yvar.name , None )\n if yv and yv.hasBinning ( 'cache' ) :\n nb2 = yv.getBins('cache')\n if nb1 != nb2 :\n yv.setBins ( max ( nb1 , nb2 ) , 'cache' )\n self.info ('Adjust binning cache %s->%s for variable %s in dataset' % ( nb2 , nb1 , yv.name ) )\n elif yv :\n yv.setBins ( nb1 , 'cache' )\n self .info ('Set binning cache %s for variable %s in dataset' % ( nb1 , yv.name ) )\n \n result , f = PDF.fitTo ( self ,\n dataset ,\n draw = False , ## false here!\n nbins = nbins ,\n silent = silent ,\n refit = refit ,\n timer = timer , \n args = args , **kwargs ) \n if not draw :\n return result , None\n\n \n ## 2D \n if 1 < nbins and isinstance ( ybins , integer_types ) and 1 < ybins :\n return result, self.draw ( None , dataset , nbins , ybins , silent = silent )\n \n if isinstance ( draw , str ) :\n if draw.upper() in ( '1' , 'X' ) :\n return result, self.draw1 ( dataset , nbins = nbins , silent = silent )\n elif draw.upper() in ( '2' , 'Y' ) :\n return result, self.draw2 ( dataset , nbins = nbins , silent = silent )\n\n ## return 2D \n return result, self.draw ( None , dataset , silent = silent )", "def histogram_equalize(img):\n\n img_copy = np.copy(img)\n\n elements,counts = np.unique(img_copy,return_counts=True)\n pdf = counts/counts.sum()\n cdf = np.cumsum(pdf)\n new_values = cdf * 255\n\n old_new_map = dict(zip(elements,new_values))\n\n img_new = np.zeros(img_copy.shape)\n for i in old_new_map:\n img_new[img_copy == i] = old_new_map[i]\n\n return img_new", "def SetInput(self, input: 'itkHistogramD') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHDF_SetInput(self, input)", "def fit(self, mask_file='', mode='+-', zscore=2):\n self.mask_file = mask_file\n self.mode = mode\n self.zscore = zscore\n self._icc_imgs = None\n self._update(force=True)", "def fit_gaussian(self, mask=None):\n data = self.data\n mask = numpy.logical_or(mask, numpy.ma.getmaskarray(data))\n fdata = data[~mask].data\n xdata = numpy.asarray([cm[~mask]\n for cm in self.bset.cmesh]).transpose()\n scale, mean, cov = fit_ndgaussian(xdata, fdata)\n return scale, mean, cov", "def SetInput(self, input: 'itkHistogramF') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFUC_SetInput(self, input)", "def gauss_mask(shape, low):\n h, w = shape\n outw=w/2+1 # if reduced\n # we have full heightfreqs:\n irow = np.fft.fftfreq(h).reshape(h,1)\n # cols are halfed\n icol = np.fft.fftfreq(w)[:outw].reshape(1,outw)\n r = np.exp(-(icol*icol+irow*irow)/(low*low))\n return r", "def hist_eq(img):\n hist, bins = np.histogram(img.flatten(), 256, [0, 256])\n cdf = hist.cumsum()\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint8')\n img2 = cdf[img]\n return img2", "def hist_and_thresh(self):\n bins, occ, _ = self.histogram()\n self.thresh = np.mean(bins) # initial guess\n self.peaks_and_thresh() # in case peak calculation fails\n # if np.size(self.peak_indexes) == 2: # est_param will only find one peak if the number of bins is small\n # # set the threshold where the fidelity is max\n # self.search_fidelity(self.peak_centre[0], self.peak_widths[0] ,self.peak_centre[1])\n try: \n thresh = threshold_minimum(np.array(self.stats['Counts']), len(bins))\n int(np.log(thresh)) # if thresh <= 0 this gives ValueError\n self.thresh = thresh\n except (ValueError, RuntimeError, OverflowError): pass\n try:\n # atom is present if the counts are above threshold\n self.stats['Atom detected'] = [x // self.thresh for x in self.stats['Counts']]\n # self.fidelity, self. err_fidelity = np.around(self.get_fidelity(), 4) # this is a relatively slow operation\n except (ValueError, OverflowError): pass\n return bins, occ, self.thresh", "def __get_color_histogram(self, image, seed, hist_res):\n \n L=[]\n N=len(seed)\n for i in range(N):\n \n L.append(image[seed[i][1],seed[i][0]])\n image_part=np.array(L)\n \n \n hist, bins= np.histogramdd(image_part,bins=hist_res,range=((0,255),(0,255),(0,255)) )\n #hist= ndimage.gaussian_filter(hist,sigma=7) # Gaussian smoothing\n\n return hist /np.linalg.norm(hist)", "def histogram(self, mask=None, extrema=None):\r\n uni, counts = self._getcolors()\r\n return [l for l in counts]", "def __init__(self, from_img, to_img,\n from_mask=None,\n to_mask=None,\n bins=256,\n spacing=None,\n similarity='crl1',\n interp='pv',\n sigma=0,\n renormalize=False,\n dist=None):\n # Binning sizes\n from_bins, to_bins = unpack(bins, int)\n\n # Smoothing kernel sizes\n self._from_sigma, self._to_sigma = unpack(sigma, float)\n\n # Clamping of the `from` image. The number of bins may be\n # overriden if unnecessarily large.\n data, from_bins_adjusted = clamp(from_img,\n from_bins,\n mask=from_mask,\n sigma=self._from_sigma)\n if not similarity == 'slr':\n from_bins = from_bins_adjusted\n self._from_img = Nifti1Image(data, from_img.get_affine())\n\n # Set field of view in the `from` image with potential\n # subsampling for faster similarity evaluation. This also sets\n # the _from_data and _vox_coords attributes\n if spacing == None:\n npoints = NPOINTS\n else:\n npoints = None\n if from_mask == None:\n corner, size = (0, 0, 0), None\n else:\n corner, size = smallest_bounding_box(from_mask)\n self.set_fov(spacing=spacing, corner=corner, size=size, \n npoints=npoints)\n\n # Clamping of the `to` image including padding with -1\n data, to_bins_adjusted = clamp(to_img,\n to_bins,\n mask=to_mask,\n sigma=self._to_sigma)\n if not similarity == 'slr':\n to_bins = to_bins_adjusted\n self._to_data = -np.ones(np.array(to_img.shape) + 2, dtype=CLAMP_DTYPE)\n self._to_data[1:-1, 1:-1, 1:-1] = data\n self._to_inv_affine = inverse_affine(to_img.get_affine())\n\n # Joint histogram: must be double contiguous as it will be\n # passed to C routines which assume so\n self._joint_hist = np.zeros([from_bins, to_bins], dtype='double')\n\n # Set default registration parameters\n self._set_interp(interp)\n self._set_similarity(similarity, renormalize, dist=dist)", "def histMatch(images, masks=None, hist=None, optim=False,\n optim_params={'niters':10, 'stepsize':67}, rescale_kwargs={}):\n # Error check\n if optim and not have_ssim:\n raise RuntimeError('SSIM optimisation requires scikit-image module')\n\n # Load images and masks\n images, masks = _load_images_and_masks(images, masks)\n\n # If hist not provided, obtain average histogram across images\n if hist is None:\n bins = range(257)\n allCounts = np.empty((len(images), len(bins)-1))\n for i, (im, m) in enumerate(zip(images, masks)):\n tmp = im[m] if m is not None else im.flatten()\n allCounts[i,:] = np.histogram(tmp, bins=bins)[0]\n counts = allCounts.mean(axis=0).round().astype(int)\n else:\n counts, bins = hist\n\n # Obtain flattened target histogram\n targ = np.asarray(list(itertools.chain.from_iterable(\n [ [lum] * count for lum, count in zip(bins, counts) ]\n )))\n\n # Hist equate\n for im, m in zip(images, masks):\n # Rounding errors when calculating histograms may lead to small\n # mismatches between length of idcs and targ. If so, interpolate a\n # range of indices across targ that will make it right length\n sz = m.sum() if m is not None else im.size\n if len(targ) != sz:\n ix = np.linspace(0, len(targ)-1, sz).round().astype(int)\n t = targ[ix]\n else:\n t = targ\n\n # Do SSIM optimisation if requested\n if optim == True:\n for i in range(optim_params['niters']-1):\n tmp = _hist_match_image(im, t, m, inplace=False)\n mssim, grad = ssim(\n im, tmp, data_range=255, use_sample_covariance=False,\n gaussian_weights=True, sigma=1.5, gradient=True\n )\n im[:] = tmp + optim_params['stepsize'] * im.size * grad\n\n # Do final histogram match\n _hist_match_image(im, t, m, inplace=True)\n\n # Return\n return rescale_images(images, **rescale_kwargs)", "def _getnorm(q, mask):\n maxq = np.max(q.reshape(-1, 3), axis=0)\n hist = np.histogramdd(q.reshape(-1, 3), bins=maxq + 1, range=[[-0.5, mq + 0.5] for mq in maxq], weights=~mask.ravel())[0]\n ret = hist[q.reshape(-1, 3)[:, 0], q.reshape(-1, 3)[:, 1], q.reshape(-1, 3)[:, 2]].reshape(q.shape[:2])\n ret[mask] = 1\n return ret", "def histMasking(self, frame, handHist):\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n dst = cv2.calcBackProject([hsv], [0, 1], handHist, [0, 180, 0, 256], 1)\n\n disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (21, 21))\n cv2.filter2D(dst, -1, disc, dst)\n\n ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)\n\n kernel = np.ones((5, 5), np.uint8)\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=7)\n # thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=5)\n # thresh = cv2.dilate(thresh, kernel, iterations=5)\n # thresh = cv2.erode(thresh, kernel, iterations=5)\n\n thresh = cv2.merge((thresh, thresh, thresh))\n return cv2.bitwise_and(frame, thresh)", "def estbg(im, mask=None, bins=None, plotalot=False, rout=(3,200), badval=nan):\n # 2009-09-02 17:13 IJC: Created!\n # 2009-09-04 15:07 IJC: Added RemoveOutliers option. Use only non-empty bins in fit.\n # 2009-09-08 15:32 IJC: Error returned is now divided by sqrt(N) for SDOM\n # 2009-11-03 00:16 IJC: Improved guess for gaussian dispersion\n # 2011-05-18 11:47 IJMC: Moved (e)gaussian imports to analysis.\n # 2012-01-01 21:04 IJMC: Added badval option\n # 2012-08-15 17:45 IJMC: Numpy's new histogram no longer accepts 'new' keyword\n # 2013-03-20 08:22 IJMC: Now works better even for small numbers\n # of pixels; thanks to A. Weigel @\n # ETH-Zurich for catching this!\n\n from numpy import histogram, mean, median, sqrt, linspace, isfinite, ones,std\n from pylab import find\n from scipy import optimize\n #from analysis import removeoutliers, egaussian, gaussian, stdr\n if plotalot:\n from pylab import figure, errorbar, plot, colorbar, title, hist, mean, std\n ##from analysis import imshow\n\n def gaussianChiSquared(guess, x, y, err):\n return (egaussian(guess, x, y, e=err)**2).sum()\n\n\n if mask==None:\n mask = ones(im.shape)\n dat = im.ravel()[find(mask<>0)]\n if plotalot:\n figure(); plot(im.ravel()); plot(dat)\n print mean(dat), std(dat), rout[0]*std(dat)\n print len(dat), (abs(dat-mean(dat))<(rout[0]*std(dat))).sum()\n figure(); plot(dat-mean(dat)); \n plot([0,len(dat)], [rout[0]*std(dat),rout[0]*std(dat)],'--k')\n plot([0,len(dat)], [-rout[0]*std(dat),-rout[0]*std(dat)],'--k')\n dat = removeoutliers(dat, rout[0], remove='both', center='mean', niter=rout[1], verbose=plotalot)\n ndat = len(dat)\n\n if ndat==0:\n print \"No data to work with!\"\n return (badval, badval)\n if bins==None:\n if plotalot: print \"no bins entered!\"\n datmean = dat.mean()\n datstd = stdr(dat, nsigma=3)\n nunique = len(np.unique(dat.ravel()))\n #pdb.set_trace()\n if nunique > len(dat)/20.:\n dobin = False\n else:\n dobin = True\n bins = linspace(dat.min(), dat.max(), nunique/2)\n\n if plotalot: \n print \"dat.mean, dat.std>>\" + str((dat.mean(), dat.std()))\n\n\n #if plotalot:\n # figure(); hout = hist(dat[datIndex],bins)\n #else:\n \n if dobin:\n binwidth = mean(bins[1::]-bins[:-1])\n bincenter = 0.5*(bins[1::]+bins[:-1])\n datIndex = (dat>=bins.min()) * (dat<=bins.max())\n hout = histogram(dat[datIndex], bins) #,new=True)\n gy = hout[0]\n erry = sqrt(gy)\n usableIndex = gy>0\n\n eff_binwidth = mean(bins[usableIndex][1::]-bins[usableIndex][:-1])\n guess = [gy.sum()*eff_binwidth, std(dat[datIndex]), median(dat[datIndex])]\n\n if 1.0*usableIndex.sum()/usableIndex.size < 0.5:\n out = guess\n else:\n out = optimize.fmin(gaussianChiSquared, guess, \\\n args=(bincenter[usableIndex],gy[usableIndex], erry[usableIndex]), \\\n disp=plotalot)\n\n if plotalot:\n from pylab import figure, errorbar, plot, colorbar, title\n from nsdata import imshow\n print 'guess>>',guess\n print 'fit>>',out\n figure()\n imshow(im); colorbar()\n figure()\n errorbar(bincenter[usableIndex], gy[usableIndex], erry[usableIndex], fmt='ob')\n plot(bincenter, gaussian(out, bincenter),'-r', linewidth=2)\n title('Mean: %f, Std. Dev.: %f' % (out[2], out[1]))\n\n ret = out[2], out[1]/sqrt(ndat)\n else:\n ret = datmean, datstd/sqrt(ndat)\n\n return ret", "def histogram_equalization(img):\n\n if len(img.shape) == 3:\n img_copy = np.copy(img)\n\n blue = img_copy[:,:,0]\n blue = histogram_equalize(blue)\n\n green = img_copy[:,:,1]\n green = histogram_equalize(green)\n\n red = img_copy[:,:,2]\n red = histogram_equalize(red)\n\n new_img = np.zeros(img_copy.shape)\n\n new_img[:,:,0] = blue\n new_img[:,:,1] = green\n new_img[:,:,2] = red\n\n return new_img\n\n else:\n return histogram_equalize(img)", "def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])", "def sym_histogram(self, X, mask=None):\n distances = euclidean_distance(X, self.V)\n membership = T.nnet.softmax(-distances / self.g ** 2)\n\n if mask is not None:\n histogram = membership * T.reshape(mask, (mask.shape[0], 1))\n histogram = T.sum(histogram, axis=0) / T.sum(mask, axis=0)\n else:\n histogram = T.mean(membership, axis=0)\n return histogram", "def Fill(self, *args, **kwargs):\n self._varexp = kwargs.get(\"varexp\")\n self._cuts = kwargs.get(\"cuts\", [])\n self._weight = kwargs.get(\"weight\", \"1\")\n if len(args) == 1 and isinstance(args[0], (str, unicode)):\n IOManager.FillHistogram(self, args[0], **kwargs)\n if not kwargs.get(\"append\", False):\n self._errorband.Reset()\n self._errorband.Add(self)\n else:\n super(Histo1D, self).Fill(*args)", "def hist_stretch(im):\n\n hist,bins = np.histogram(im.flatten(),256,[0,256])\n cdf = hist.cumsum()\n cdf_m = np.ma.masked_equal(cdf,0)\n cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())\n cdf = np.ma.filled(cdf_m,0).astype('uint8')\n\n return cdf[im]", "def histogram_equalize(im_orig):\n if im_orig.ndim == 3:\n return _histogram_equalize_rgb(im_orig)\n return _histogram_equalize_grayscale(im_orig)", "def compute_histogram(self, image):\n hist = [0] * 256\n [h, w] = image.shape\n print(h,w)\n i = 0\n while i < 256:\n for row in range(h):\n for col in range(w):\n if image[row, col] == i:\n hist[i] += 1\n #print(hist[i])\n i += 1\n\n return hist", "def SetInput(self, input: 'itkHistogramF') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFSS_SetInput(self, input)", "def compute_histogram(im, block_factor=3, color_space='HSV'):\n\n # Shape = rows and columns\n remainder_rows = im.shape[0] % block_factor\n remainder_cols = im.shape[1] % block_factor\n\n im_block = cv2.copyMakeBorder(im, block_factor - remainder_rows, 0, block_factor - remainder_cols, 0,\n cv2.BORDER_CONSTANT)\n\n windowsize_r = int(im_block.shape[0] / block_factor)\n windowsize_c = int(im_block.shape[1] / block_factor)\n\n # print(im_block.shape)\n # print(str(windowsize_r)+' '+str(windowsize_c))\n # cv2.imshow(\"fullImg\", im_block)\n\n hist = []\n for r in range(0, im_block.shape[0], windowsize_r):\n for c in range(0, im_block.shape[1], windowsize_c):\n hist_blocks = []\n window = im_block[r:r + windowsize_r, c:c + windowsize_c]\n if color_space == 'GRAY':\n window_gray = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)\n hist_block = cv2.calcHist([window_gray], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'RGB':\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'HSV':\n window = cv2.cvtColor(window, cv2.COLOR_BGR2HSV)\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n \n hist.append(hist_blocks)\n\n return hist", "def compute_mask(self, inputs, mask=None):\n return None", "def cs4243_histmatch(ori_image, refer_image):\n \n ##your code here ###\n\n # get cdf of ori and ref image\n grey_level = 256\n ori_hist, ori_cum_hist, ori_res_image, ori_uni_hist = cs4243_histequ(ori_image, grey_level)\n ref_hist, ref_cum_hist, ref_res_image, ref_uni_hist = cs4243_histequ(refer_image, grey_level)\n \n # map each ori cdf to ref cdf and get the mapped index as matched grey level\n map_value = []\n for i in range(grey_level):\n ori_cdf = ori_cum_hist[i]\n matched_intensity = np.uint8(np.abs(ref_cum_hist - ori_cdf).argmin())\n map_value.append(matched_intensity)\n ##\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist", "def binned_fft(self):\n self.fft()\n self.fft_bins_y = self.binn_fft()\n self.fft_bins_y = np.asarray(self.fft_bins_y) * UPDATE_FACTOR + self.last_fft_bins_y *(1 - UPDATE_FACTOR)\n self.last_fft_bins_y = self.fft_bins_y", "def _setup(self, h: histogram.Histogram1D) -> Tuple[histogram.Histogram1D, pachyderm.fit.T_FitArguments]:\n fit_range = self.fit_options[\"range\"]\n # Restrict the range so that we only fit within the desired input.\n restricted_range = (h.x > fit_range.min) & (h.x < fit_range.max)\n restricted_hist = histogram.Histogram1D(\n # We need the bin edges to be inclusive.\n # Need the +/- epsilons here to be extra safe, because apparently some of the <= and >= can fail\n # (as might be guessed with floats, but I hadn't observed until now). We don't do this above\n # because we don't want to be inclusive on the edges.\n bin_edges = h.bin_edges[(h.bin_edges >= (fit_range.min - utils.epsilon)) & (h.bin_edges <= (fit_range.max + utils.epsilon))],\n y = h.y[restricted_range],\n errors_squared = h.errors_squared[restricted_range]\n )\n\n # Default arguments\n # Use the minimum of the histogram as the starting value.\n arguments: pachyderm.fit.T_FitArguments = {\n \"slope\": 0, \"error_slope\": 0.005,\n \"const\": 1, \"error_const\": 0.005,\n \"limit_slope\": (-100, 100),\n \"limit_const\": (-10, 10),\n }\n\n return restricted_hist, arguments", "def fit(self, mask_file='', mode='+', zscore=2):\n if not mask_file:\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n\n super(MIALABICAResultsPlotter, self).fit(\n mask_file=mask_file,\n mode=mode,\n zscore=zscore\n )", "def updateImage(self, autoHistogramRange=True):\n super().updateImage(autoHistogramRange=autoHistogramRange)\n self.getImageItem().setLookupTable(self.lut)", "def test_numpy_bins(self):\n # Load the data from the fixture\n data = load_occupancy(return_dataset=True)\n X, y = data.to_numpy()\n\n visualizer = BalancedBinningReference()\n visualizer.fit(y)\n visualizer.finalize()\n self.assert_images_similar(visualizer, tol=0.5)", "def fit(self, signal):\n self.signal = signal", "def find_cars(img,\n clf,\n scaler,\n color_space,\n spatial_size,\n hist_bins,\n scale,\n cells_per_step,\n x_start_stop,\n y_start_stop,\n orient,\n pix_per_cell,\n cell_per_block):\n draw_img = np.copy(img)\n\n heatmap = np.zeros_like(img[:, :, 0])\n\n img = img.astype(np.float32)/255\n\n img_to_search = img[y_start_stop[0]:y_start_stop[1], x_start_stop[0]:x_start_stop[1], :]\n\n # color transformed image\n ctrans_to_search = change_color_space(img_to_search, colorspace=color_space)\n\n if scale != 1:\n imshape = ctrans_to_search.shape\n ctrans_to_search = cv2.resize(ctrans_to_search, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\n\n ch1 = ctrans_to_search[:, :, 0]\n ch2 = ctrans_to_search[:, :, 1]\n ch3 = ctrans_to_search[:, :, 2]\n\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # number of hog cells\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n nfeat_per_block = orient*cell_per_block**2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # compute individual channel HOG features for the intire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n # extract hog features for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos*pix_per_cell\n ytop = ypos*pix_per_cell\n\n # extract the image path\n subimg = cv2.resize(ctrans_to_search[ytop:ytop+window, xleft:xleft+window], (64,64))\n\n # get color features\n spatial_features = get_bin_spatial(subimg, size=spatial_size)\n hist_features = get_color_hist(subimg, nbins=hist_bins)\n\n # scale features and make prediction\n test_features = scaler.transform(np.hstack((spatial_features, hist_features, hog_features)))\n\n test_prediction = clf.predict(test_features)\n\n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale)\n win_draw = np.int(window*scale)\n cv2.rectangle(draw_img, (xbox_left+x_start_stop[0], ytop_draw+y_start_stop[0]), (xbox_left+win_draw+x_start_stop[0], ytop_draw+win_draw+y_start_stop[0]), (0,0,255), 6)\n heatmap[ytop_draw+y_start_stop[0]:ytop_draw+win_draw+y_start_stop[0], xbox_left+x_start_stop[0]:xbox_left+win_draw+x_start_stop[0]] += 1\n\n return draw_img, heatmap", "def _fit(self, dataset):\n self.dataset = dataset\n self.masker = self.masker or dataset.masker\n self.null_distributions_ = {}\n\n ma_values = self._collect_ma_maps(\n coords_key=\"coordinates\",\n maps_key=\"ma_maps\",\n fname_idx=0,\n )\n\n # Determine bins for null distribution histogram\n max_ma_values = np.max(ma_values, axis=1)\n max_poss_ale = self._compute_summarystat(max_ma_values)\n self.null_distributions_[\"histogram_bins\"] = np.round(\n np.arange(0, max_poss_ale + 0.001, 0.0001), 4\n )\n\n stat_values = self._compute_summarystat(ma_values)\n\n iter_df = self.inputs_[\"coordinates\"].copy()\n rand_idx = np.random.choice(self.xyz.shape[0], size=(iter_df.shape[0], self.n_iters))\n rand_xyz = self.xyz[rand_idx, :]\n iter_xyzs = np.split(rand_xyz, rand_xyz.shape[1], axis=1)\n\n # Define parameters\n iter_dfs = [iter_df] * self.n_iters\n params = zip(iter_dfs, iter_xyzs)\n\n if self.n_cores == 1:\n if self.memory_limit:\n perm_scale_values = np.memmap(\n self.memmap_filenames[1],\n dtype=stat_values.dtype,\n mode=\"w+\",\n shape=(self.n_iters, stat_values.shape[0]),\n )\n else:\n perm_scale_values = np.zeros(\n (self.n_iters, stat_values.shape[0]), dtype=stat_values.dtype\n )\n for i_iter, pp in enumerate(tqdm(params, total=self.n_iters)):\n perm_scale_values[i_iter, :] = self._run_permutation(pp)\n if self.memory_limit:\n # Write changes to disk\n perm_scale_values.flush()\n else:\n with mp.Pool(self.n_cores) as p:\n perm_scale_values = list(\n tqdm(p.imap(self._run_permutation, params), total=self.n_iters)\n )\n perm_scale_values = np.stack(perm_scale_values)\n\n p_values, z_values = self._scale_to_p(stat_values, perm_scale_values)\n\n del perm_scale_values\n\n logp_values = -np.log10(p_values)\n logp_values[np.isinf(logp_values)] = -np.log10(np.finfo(float).eps)\n\n # Write out unthresholded value images\n images = {\"stat\": stat_values, \"logp\": logp_values, \"z\": z_values}\n return images", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUS2_SetUseHistograms(self, _arg)", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUS2_SetUseHistograms(self, _arg)", "def SetInput(self, input: 'itkHistogramF') -> \"void\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFUS_SetInput(self, input)", "def _corr_ax1(input_image):\n dim = input_image.shape[1]\n m_ones = np.ones(dim)\n norm_mask = np.correlate(m_ones, m_ones, mode=\"full\")\n # not sure that the /2 is the correct correction\n est_by_row = [np.argmax(np.correlate(v, v[::-1], mode=\"full\") / norm_mask) / 2 for v in input_image]\n return np.histogram(est_by_row, bins=np.arange(0, dim + 1))", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_SetUseHistograms(self, _arg)", "def histogram_equalization_helper(im):\n\n im *= (255 / im.max())\n c_m = im.min()\n hist_orig, bins = np.histogram(im, bins=256, range=[0, 256])\n cumulative_hist = np.cumsum(hist_orig)\n cumulative_hist = (((cumulative_hist - c_m) * 255) /(im.size)).astype(int)\n im_eq = cumulative_hist[im.astype(int)]\n hist_eq, bins_eq = np.histogram(im_eq, bins=256, range=[0, 256])\n im_eq = im_eq/ 255\n\n # plt.plot((bins[:-1] + bins[1:]) / 2, hist_orig)\n # plt.hist(im.flatten(), bins=128)\n # plt.show()\n #\n # plt.plot((bins_eq[:-1] + bins_eq[1:]) / 2, hist_eq)\n # plt.hist(im.flatten(), bins=128)\n #\n # plt.show()\n return im_eq, hist_orig, hist_eq", "def histeq( im, nbr_bins = 256):\n\t# get image histogram \n\timhist, bins = histogram( im.flatten(), nbr_bins, normed = True) \n\tcdf = imhist.cumsum() \n\t# cumulative distribution function cdf = 255 * cdf / cdf[-1] \n\t# normalize \n\t# use linear interpolation of cdf to find new pixel values \n\tim2 = interp( im.flatten(), bins[:-1], cdf) \n\treturn im2.reshape( im.shape), cdf", "def compute_histogram(image, n_bins, color_space=\"RGB\"):\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n\n hist_channels = list(range(n_channels))\n hist_bins = [n_bins,]*n_channels\n hist_range = [0, 256]*n_channels\n\n hist = cv.calcHist([image], hist_channels, None, hist_bins,\n hist_range)\n hist = cv.normalize(hist, hist, alpha=0, beta=1,\n norm_type=cv.NORM_MINMAX).flatten() # change histogram range from [0,256] to [0,1]\n return hist", "def img_histogram(img):\n\n plt.figure()\n\n if len(img.shape) > 2:\n\n plt.subplot(3,1,1)\n plt.hist(img[:,:,0].ravel(),bins=range(257),color='b')\n plt.title('Image Histogram')\n plt.legend('Blue')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,2)\n plt.hist(img[:,:,1].ravel(),bins=range(257),color='g')\n plt.legend('Green')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,3)\n plt.hist(img[:,:,2].ravel(),bins=range(257),color='r')\n plt.legend('Red')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()\n\n else:\n\n plt.hist(img[:,:].ravel(),bins=range(257))\n plt.title('Image Histogram - Grayscale')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def update_mask(self, mask):\n\n # Get general mask\n general_mask = self.general_mask\n\n # Complete with the input mask\n new_mask = (general_mask | mask)\n\n # Update attribute\n self.mask = new_mask\n\n # Correct i_bounds if it was not specified\n # self.update_i_bnds()\n\n # Re-compute weights\n self.weights, self.weights_k_idx = self.compute_weights()\n\n return", "def SetInput(self, histogram: 'itkHistogramD') -> \"void\":\n return _itkHistogramToIntensityImageFilterPython.itkHistogramToIntensityImageFilterHDIF2_Superclass_SetInput(self, histogram)", "def apply_mask(face: np.array, mask: np.array) -> np.array:\n mask_h, mask_w, _ = mask.shape\n face_h, face_w, _ = face.shape\n\n # Resize the mask to fit on face\n factor = min(face_h / mask_h, face_w / mask_w)\n new_mask_w = int(factor * mask_w)\n new_mask_h = int(factor * mask_h)\n new_mask_shape = (new_mask_w, new_mask_h)\n resized_mask = cv2.resize(mask, new_mask_shape)\n\n # Add mask to face - ensure mask is centered\n face_with_mask = face.copy()\n non_white_pixels = (resized_mask < 250).all(axis=2)\n off_h = int((face_h - new_mask_h) / 2)\n off_w = int((face_w - new_mask_w) / 2)\n face_with_mask[off_h: off_h+new_mask_h, off_w: off_w+new_mask_w][non_white_pixels] = \\\n resized_mask[non_white_pixels]\n\n return face_with_mask", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUS3_SetUseHistograms(self, _arg)", "def fit():\n pass", "def via_fit_imaging_from(self, fit: FitImaging) -> Visuals2D:\r\n return self.via_mask_from(mask=fit.mask)", "def histogram_equalize(im_orig):\n img = get_gray_channel(im_orig)\n img = float2int(img)\n \n # step1: computing histogram\n hist_orig, bins = np.histogram(img, bins=np.arange(MAX_VALUE + 1))\n\n # step2: computing cumulative histogram\n cum_hist = np.cumsum(hist_orig)\n \n # step3+4: Normalizing cumulative histogram and multiplying by\n # the maximal gray level\n norm_factor = (MAX_VALUE - 1) / img.size\n cum_hist = np.multiply(cum_hist, norm_factor)\n \n # step5: Verifying values are in the right range\n if (int(np.amin(cum_hist)) != 0) or \\\n (int(np.amax(cum_hist)) != MAX_VALUE - 1):\n cum_hist = linear_stretch(cum_hist)\n\n # step6: Round values\n cum_hist = np.round(cum_hist)\n\n # step7: Map image intensity values using histogram\n im_eq = cum_hist[img]\n\n hist_eq = np.histogram(im_eq, bins=np.arange(MAX_VALUE + 1))[0]\n im_eq = int2float(im_eq)\n im_eq = update_gray_channel(im_orig, im_eq)\n\n return im_eq, hist_orig, hist_eq", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_SetUseHistograms(self, _arg)", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_SetUseHistograms(self, _arg)", "def rebin(Data, width, mean=False, by_nbins=False) :\n \n # Input tells us whether to use mean or median.\n if mean :\n method = ma.mean\n else :\n method = ma.median\n\n if by_nbins :\n width = int(width)\n if width <= 1 :\n raise ValueError(\"Invalid number of bins to average\")\n # Get new axis parameters.\n new_cdelt = width*Data.field['CDELT1']\n nbins = int(sp.ceil(float(Data.dims[-1])/width))\n new_centre = nbins//2 + 1\n Data.calc_freq()\n Data.field['CRVAL1'] = Data.freq[int((new_centre+0.5)*width)]\n # Case where evenly divisable (much more efficient).\n if Data.dims[-1] % width == 0:\n new_data = Data.data\n new_data.shape = Data.data.shape[:-1] + (nbins, width)\n new_data = method(new_data, -1)\n else :\n # Allowcate memory for Data array.\n new_data = ma.empty(Data.dims[:3] + (nbins,))\n # Loop over new bins and rebin.\n for ii in xrange(nbins) :\n new_data[:,:,:,ii] = method(\n Data.data[:,:,:,ii*width:(ii+1)*width],3)\n Data.set_data(new_data)\n else :\n # Convert to Hertz.\n width = width*1.0e6\n new_cdelt = width * sp.sign(Data.field['CDELT1'])\n # Figure out some basics.\n Data.calc_freq()\n freq = sp.array(Data.freq)\n # Extra bit on the bandwidth is because frequency labels are channel \n # centre.\n bandwidth = abs(freq[-1] - freq[0]) + abs(Data.field['CDELT1'])\n nbins = int(bandwidth//width)\n new_centre = int((Data.field['CRPIX1']-1)\n * abs(Data.field['CDELT1'])/width)\n new_dims = Data.dims[0:-1] + (nbins, )\n # Get old data and allowcate memory for new data.\n old_data = ma.array(Data.data, copy=True)\n Data.set_data(ma.zeros(new_dims))\n new_freq = Data.field['CRVAL1'] + new_cdelt*(sp.arange(nbins)\n - new_centre)\n for ii in range(1,nbins-1) :\n inds = (sp.logical_and(\n abs(freq - new_freq[ii]) <= abs(freq - new_freq[ii+1]),\n abs(freq - new_freq[ii]) < abs(freq - new_freq[ii-1])))\n subdata = (old_data[:,:,:,inds])\n Data.data[:,:,:,ii] = method(subdata, 3)\n # Above loop breaks for end points... deal with them.\n inds, = sp.where(abs(freq - new_freq[0]) <= abs(freq - new_freq[1]))\n subdata = old_data[:,:,:,inds]\n Data.data[:,:,:,0] = method(subdata, 3)\n inds, = sp.where(abs(freq-new_freq[nbins-1])\n < abs(freq-new_freq[nbins-2]))\n subdata = old_data[:,:,:,inds]\n Data.data[:,:,:,nbins-1] = method(subdata, 3)\n Data.freq = new_freq\n Data.field['CRPIX1'] = sp.array(new_centre + 1, dtype=int)\n Data.field['CDELT1'] = sp.array(new_cdelt, dtype=float)", "def equalise_hist(image, bin_count=256):\n # TODO: your histogram equalization code\n #define arrays\n image = img_as_ubyte(image)\n row,col = image.shape\n new_image = np.zeros((row,col),dtype='uint8') \n\n # compute the value of each grayscale,and save in image_hist \n image_hist = np.bincount(image.flatten(), minlength=(bin_count))\n\n # normalise n[]\n norm_arr = (np.cumsum(image_hist)/(image.size))*(bin_count-1)\n norm_arr = norm_arr.astype('uint8')\n \n #Compute a normalized cumulative histogram\n for x in range(row):\n for y in range(col):\n new_image[x,y] = norm_arr[image[x,y]]\n \n return new_image", "def histeq(im, nbr_bins = 256):\n\t# get image histogram\n\timhist, bins = pl.histogram(im.flatten(), nbr_bins, normed = True)\n\tcdf = imhist.cumsum() # cumulative distribution function\n\tcdf = 255 * cdf / cdf[-1] # normalize\n\t# use linear interpolation of cdf to find new pixel values\n\tim2 = pl.interp(im.flatten(), bins[:-1], cdf)\n\treturn im2.reshape(im.shape)", "def SetUseHistograms(self, _arg: 'bool const') -> \"void\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_SetUseHistograms(self, _arg)", "def num_fill_edges(mask,i):\n n = mask.shape[0]\n nb = np.nonzero(mask[i, :])[0]\n clique_edges = nb.shape[0]*(nb.shape[0]-1)/2\n current_edges = mask[np.ix_(nb, nb)].sum()/2\n return clique_edges - current_edges", "def Clone(self) -> \"itkHistogramThresholdCalculatorHFF_Pointer\":\n return _itkHistogramThresholdCalculatorPython.itkHistogramThresholdCalculatorHFF_Clone(self)", "def maxfit(self, *args, **kwargs):\n return _image.image_maxfit(self, *args, **kwargs)", "def match_histograms(src_path, dst_path, size=128, step_size=128, *, reference_path):\n with rasterio.open(src_path) as src:\n profile = src.profile.copy()\n windows = list(\n sliding_windows(\n (size, size), (step_size, step_size), src.width, src.height, whole=False\n )\n )\n\n with rasterio.open(reference_path) as ref:\n with rasterio.open(dst_path, \"w\", **profile) as dst:\n for c, (win, (i, j)) in tqdm(list(enumerate(windows))):\n _logger.debug(\"%s %s\", win, (i, j))\n\n img = read_window(src, win)\n ref_img = read_window(ref, win)\n\n matched_img = exposure.match_histograms(\n img, ref_img, multichannel=True\n )\n write_window(matched_img, dst, win)", "def adaptiveEqHist(img, clipLimit=2.0, tileGridSize=(8,8)):\n\tgray = grayscale(img)\n\tclahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)\n\tcl1 = clahe.apply(gray)\n\treturn cl1" ]
[ "0.5717073", "0.55807126", "0.5528441", "0.55023885", "0.54681766", "0.54579365", "0.54448426", "0.54230475", "0.54140556", "0.5352749", "0.53307045", "0.5266392", "0.52623886", "0.5244729", "0.52365315", "0.52273583", "0.52006847", "0.52000165", "0.5193751", "0.5170942", "0.5166152", "0.515521", "0.5148498", "0.5131553", "0.513034", "0.5128944", "0.51003295", "0.509646", "0.5091102", "0.50773436", "0.5070945", "0.50523084", "0.504604", "0.50447625", "0.5017601", "0.5010572", "0.50091016", "0.50080305", "0.5003182", "0.49919796", "0.4987083", "0.4984888", "0.4966981", "0.49658456", "0.49643353", "0.4957397", "0.4949938", "0.49436677", "0.493777", "0.49240026", "0.49062186", "0.4904565", "0.48946992", "0.48841888", "0.4880029", "0.48624948", "0.48606277", "0.4849759", "0.4834201", "0.48335186", "0.4832686", "0.48318225", "0.48231882", "0.48202115", "0.4813272", "0.4812477", "0.480354", "0.4795655", "0.4792727", "0.47869098", "0.47791326", "0.4778977", "0.4776825", "0.47732887", "0.4772306", "0.47722816", "0.47695762", "0.47653386", "0.47641456", "0.47611225", "0.47604987", "0.47531536", "0.47414607", "0.47397134", "0.47378972", "0.47371116", "0.4731013", "0.47243863", "0.47230804", "0.47211727", "0.47208506", "0.4720513", "0.47182482", "0.4717759", "0.4717461", "0.4713037", "0.47092915", "0.47069523", "0.47034374", "0.46998832" ]
0.57911646
0
Find the position of the commone line in 3D Formula is (RB^T zhat) cross (RA^T zhat) Returns phi, theta of the common line in degrees. theta always < 90 Notice you don't need to enter psi's; they are irrelevant
def common_line_in3D(phiA,thetaA,phiB,thetaB): from math import pi, sqrt, cos, sin, asin, atan2 piOver=pi/180.0; ph1 = phiA*piOver; th1 = thetaA*piOver; ph2 = phiB*piOver; th2 = thetaB*piOver; #nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ; #ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ; #nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR); nx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2) ny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2) nz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2) norm = nx*nx + ny*ny + nz*nz if norm < 1e-5: #print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB return 0.0, 0.0 if nz<0: nx=-nx; ny=-ny; nz=-nz; #thetaCom = asin(nz/sqrt(norm)) phiCom = asin(nz/sqrt(norm)) #phiCom = atan2(ny,nx) thetaCom = atan2(ny, nx) return phiCom*180.0/pi , thetaCom*180.0/pi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def theta_finder(theta, point_a, point_b, point_c, point_c_new):\n x, y, z = parametrized_circle(point_a, point_b, point_c, theta)\n residual = (x - point_c_new[0])**2 + (y - point_c_new[1])**2 + (z - point_c_new[2])**2\n return residual", "def cart2spheric(x, y, z):\n # doesn't compute r because chosen egal to 1\n with np.errstate(all='ignore'):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n\n return theta, phi", "def CR(phi):\n return (np.kron(P0,s0) + np.kron(P1,R(phi)))", "def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)", "def get_polar_coordinates(cup_position, bot_position):\n\n distance_x = cup_position[0] - bot_position[0]\n distance_y = cup_position[1] - bot_position[1]\n\n r = math.hypot(distance_x, distance_y)\n theta = math.degrees(math.atan(distance_y/distance_x))\n\n return r, theta", "def R3(theta):\n\n DCM = np.array([[np.cos(theta), np.sin(theta), 0], \n [-np.sin(theta), np.cos(theta), 0], \n [0, 0, 1]])\n\n return DCM", "def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]", "def intersection_right(theta):\n a_ccs_rsm_tran = hom_translation_matrix(\n t_x=0.139807669447128, t_y=-0.0549998406976098, t_z=-0.051)\n a_ccs_rsm_rot = hom_rotation(z_axis_rotation_matrix(radians(-15.0)))\n a_mcs_2_joint = hom_rotation(z_axis_rotation_matrix(theta))\n a_mcs_2_sp_2_1 = hom_translation_matrix(\n t_x=0.085, t_y=0, t_z=-0.0245)\n\n a_ccs_sp_2_1 = a_ccs_rsm_tran @ a_ccs_rsm_rot @ a_mcs_2_joint @ a_mcs_2_sp_2_1\n return get_translation(a_ccs_sp_2_1)", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def pol2cart(theta, rho, z=None):\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n\n if z is None:\n return x, y\n else:\n return x, y, z", "def calc_torsion_phi(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n paC = prev_res.get_atom('C')\n aN = self.get_atom('N')\n aCA = self.get_atom('CA')\n aC = self.get_atom('C')\n return AtomMath.calc_torsion_angle(paC, aN, aCA, aC)", "def get_angle(pt1,pt2,pt3):\r\n a = float(get_distance(pt1,pt2))\r\n b = float(get_distance(pt2,pt3))\r\n c = float(get_distance(pt1,pt3))\r\n angle = np.arccos((a**2 + b**2 - c**2)/(2*a*b)) # Law of Cosines \r\n \r\n return angle", "def _get_pt_theta(self, R, C, base_rad_m=0.2):\n #rospy.loginfo(\"Received R: %s, C: %s\" % (R, C))\n G = C - R\n #rospy.loginfo(\"Calculated G = %s\" % G)\n G_mag = np.linalg.norm(G)\n #rospy.loginfo(\"Calculated G_mag = %s\" % G_mag)\n\n # magnitude of distance for goal is magnitude of distance\n # between points - radius of base\n G_p_mag = G_mag - base_rad_m \n #rospy.loginfo(\"Then G_p_mag = %s\" % G_p_mag)\n gx, gy = G[0,0], G[1, 0]\n #rospy.loginfo(\"gx is %s, gy is %s\" % (gx, gy))\n theta = np.arctan(gy/gx)\n # Handle cases where tangent wraps around\n if gx < 0.0:\n theta += np.pi\n #rospy.loginfo(\"Then theta is %s radians (%s degrees)\" % (theta, np.rad2deg(theta)))\n G_p = G_p_mag * (np.array([np.cos(theta), np.sin(theta)]).reshape(-1, 1))\n #rospy.loginfo(\"G_p is %s\" % G_p)\n pt = R + G_p\n #rospy.loginfo(\"Finally, pt is %s\" % pt)\n #rospy.loginfo(\"Determined pt = %s and theta = %s\" % (pt, theta))\n\n return pt, theta", "def theta_phi(Collimator_square, sample_point):\n p1,p2,p3,p4=Collimator_square\n\n points = np.array([sample_point-p1, sample_point-p2, sample_point-p3, sample_point-p4])\n points=points.transpose(1,0,2) #shape: (pointsNum,4,3)\n\n theta = np.arctan2(points[:, :, 0],points[:, :, 1] )\n\n norm_x_y=np.sqrt(points[:, :, 0]**2+points[:, :, 1]**2)\n phi = np.arctan2(norm_x_y, points[:, :, 2])\n\n return theta, phi", "def rotate(prime_pos, theta, phi):\n light_dir = np.array([0, 0, 1])\n origin_prime = np.array(prime_pos)\n light_dir = phi_rot(light_dir, phi)\n light_dir = theta_rot(light_dir, theta)\n # origin = phi_rot(origin_prime, phi)\n origin = theta_rot(origin_prime, theta)\n return origin, light_dir", "def calculate_theta_vals(self) -> None:\n A = np.zeros(self.num_points) # Inappropriate names, but they mirror Knuth's notation.\n B = np.zeros(self.num_points)\n C = np.zeros(self.num_points)\n D = np.zeros(self.num_points)\n R = np.zeros(self.num_points)\n\n # Calculate the entries of the five vectors.\n # Skip first and last point if path is non-cyclic.\n point_ind = range(self.num_points) if self.is_cyclic else range(1, self.num_points - 1)\n for i in point_ind:\n z_h = self.points[i - 1]\n z_i = self.points[i]\n z_j = self.points[(i + 1) % self.num_points]\n\n A[i] = z_h.alpha / (z_i.beta ** 2 * z_h.d_val)\n B[i] = (3 - z_h.alpha) / (z_i.beta ** 2 * z_h.d_val)\n C[i] = (3 - z_j.beta) / (z_i.alpha ** 2 * z_i.d_val)\n D[i] = z_j.beta / (z_i.alpha ** 2 * z_i.d_val)\n R[i] = -B[i] * z_i.psi - D[i] * z_j.psi\n\n # Set up matrix M such that the soln. Mx = R are the theta values.\n M = np.zeros((self.num_points, self.num_points))\n for i in range(self.num_points):\n # Fill i-th row of M\n M[i][i - 1] = A[i]\n M[i][i] = B[i] + C[i]\n M[i][(i + 1) % self.num_points] = D[i]\n\n # Special formulas for first and last rows of M with non-cyclic paths.\n if not self.is_cyclic:\n # First row of M\n alpha_0 = self.points[0].alpha\n beta_1 = self.points[1].beta\n xi_0 = (alpha_0 ** 2 * self.begin_curl) / beta_1 ** 2\n M[0][0] = alpha_0 * xi_0 + 3 - beta_1\n M[0][1] = (3 - alpha_0) * xi_0 + beta_1\n R[0] = -((3 - alpha_0) * xi_0 + beta_1) * self.points[1].psi\n # Last row of M\n alpha_n_1 = self.points[-2].alpha\n beta_n = self.points[-1].beta\n xi_n = (beta_n ** 2 * self.end_curl) / alpha_n_1 ** 2\n M[-1][-2] = (3 - beta_n) * xi_n + alpha_n_1\n M[-1][-1] = (beta_n * xi_n + 3 - alpha_n_1)\n R[-1] = 0\n\n # Solve for theta values.\n thetas = np.linalg.solve(M, R)\n for i, point in enumerate(self.points):\n point.theta = thetas[i]", "def sph2car(r, theta, phi):\n x = r * np.sin(theta) * np.cos(phi)\n y = r * np.sin(theta) * np.sin(phi)\n z = r * np.cos(theta)\n\n return x, y, z", "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "def get_x_y_z(drone, p, q, r):\n num_cameras = 2\n camera_constants = [0,math.pi/2]\n rads = np.zeros(num_cameras)\n phis = np.zeros(num_cameras)\n d = np.zeros(num_cameras)\n theta = np.zeros(num_cameras)\n Hs = np.zeros(num_cameras)\n s = 12\n HFOV = math.pi/4\n VFOV = 5*math.pi/36\n HPIX = 1280\n VPIX = 720\n #loop one, where we increment over camera number, and\n # get new information\n\n cent = calculate_centroid(p,q,r)\n for camera_num in range(num_cameras):\n\n A,B = find_a_and_b(p[camera_num],q[camera_num],r[camera_num],cent[camera_num])\n a = find_a(A,B)\n d_in = find_inner_d(a, s)\n angle_c = find_angle_c(a)\n alpha = find_alpha(HFOV, HPIX, A)\n w = find_w(angle_c, s)\n d_out = find_outer_d(w,alpha,a)\n pointy_front = is_point_front(r[camera_num],q[camera_num],p[camera_num],cent[camera_num])\n d[camera_num] = find_d(d_in,d_out,pointy_front)\n theta[camera_num] = find_theta(angle_c,A,B,camera_constants[camera_num])\n k = find_k(drone[camera_num], cent[camera_num])\n angle_k = find_angle_k(k, HFOV, HPIX)\n phi = find_phi(theta[camera_num], angle_k)\n rad = find_r(d[camera_num], angle_k)\n phis[camera_num] = phi\n rads[camera_num] = rad\n\n # end of first loop\n\n cosphis = np.cos(phis)\n sinphis = np.sin(phis)\n big_matrix = np.column_stack((cosphis,sinphis))\n points = np.zeros((int(num_cameras*(num_cameras-1)/2),2))\n i = 0\n for pair in itertools.combinations(range(num_cameras), 2):\n matrix_a = np.vstack((big_matrix[pair[0]],big_matrix[pair[1]]))\n vec_b = np.hstack((rads[pair[0]],rads[pair[1]]))\n point = np.linalg.solve(matrix_a, vec_b)\n points[i] = point\n i += 1\n drone_pos = np.mean(points,axis=0)\n\n # start of third loop\n for camera_num in range(num_cameras):\n d_prime = find_d_prime(d[camera_num], theta[camera_num], drone_pos)\n P,Q,M,N = find_P_Q_M_N(p[camera_num],q[camera_num],r[camera_num])\n h = find_h(d[camera_num],P,Q,M,N)\n angle_4 = find_angle_4(h,d[camera_num])\n Y = find_Y(drone[camera_num], cent[camera_num])\n angle_5 = find_angle_5(Y, VFOV, VPIX)\n angle_6 = angle_5 - angle_4\n h_prime = find_h_prime(d_prime, angle_6)\n Hs[camera_num] = h + h_prime\n drone_h = np.mean(H)\n return np.append(drone_pos,drone_h)", "def find_inplane_to_match(phiA,thetaA,phiB,thetaB,psiA=0,psiB=0):\n\t#from math import pi, sqrt, cos, acos, sin\n\n\tRA = Transform({'type': 'spider', 'phi': phiA, 'theta': thetaA, 'psi': psiA})\n\tRB = Transform({'type': 'spider', 'phi': phiB, 'theta': thetaB, 'psi': psiB})\n\tRBT = RB.transpose()\n\tRABT = RA * RBT\n\n\tRABTeuler = RABT.get_rotation('spider')\n\tRABTphi = RABTeuler['phi']\n\tRABTtheta = RABTeuler['theta']\n\tRABTpsi = RABTeuler['psi']\n\n\t#deg_to_rad = pi/180.0\n\t#thetaAR = thetaA*deg_to_rad\n\t#thetaBR = thetaB*deg_to_rad\n\t#phiAR = phiA*deg_to_rad\n\t#phiBR = phiB *deg_to_rad\n\n\t#d12=cos(thetaAR)*cos(thetaBR) + sin(thetaAR)*sin(thetaBR)*cos(phiAR-phiBR)\n\treturn (-RABTpsi-RABTphi),RABTtheta # 180.0*acos(d12)/pi;", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def determine_in_plane_angle(self, qxy, qz=0.0, theta_incident=0.0):\n \n k = self.get_k()\n if theta_incident==None:\n # Use internal value\n theta_incident = self.theta_incident\n theta_incident_rad = np.radians(theta_incident)\n \n from scipy.optimize import fsolve\n \n def equations(p, qxy=qxy, qz=qz, theta_incident=theta_incident, k=k):\n \n # The variable we are fitting for\n omega_rad, = p\n \n # Non-fit values: qxy, qz, k, theta_incident, k\n \n return ( (qxy*cos(omega_rad))**2 + (qxy*sin(omega_rad)+k*cos(theta_incident_rad))**2 + (qz-k*sin(theta_incident_rad))**2 - k**2 )\n\n \n omega_rad, = fsolve(equations, ( np.radians(5.0) ) )\n #print( 'omega_rad = %.2f (err = %.4f)' % ( omega_rad, equations((omega_rad, )) ) )\n \n omega = abs( np.degrees(omega_rad) )\n #print( 'omega = %.2f (err = %.4f)' % ( omega, equations((omega_rad, )) ) )\n \n \n return omega", "def Distance2RRhoPhi(r1,r2,r3):\n \n # Calculate the square-distances of \n # each pair of atoms.\n r1 = np.array(r1)\n r2 = np.array(r2) \n r3 = np.array(r3)\n \n rr1 = r1*r1\n rr2 = r2*r2\n rr3 = r3*r3\n \n return TriatomicRadialPolar.DistanceSquared2RRhoPhi(rr1,rr2,rr3)", "def rotxaxis(ya, za, angle):\n\n y = ya * math.cos(angle) - za * math.sin(angle) \n z = ya * math.sin(angle) + za * math.cos(angle)\n \n return y, z", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def getPhi(mass,resonance):\n return numpy.arctan((resonance.r0*resonance.w0)/(mass**2-resonance.w0**2)) #need to make this arccotan? invert args", "def get_angle(a: Keypoint, b: Keypoint, c: Keypoint) -> float:\n # get a vector with origin in (0,0) from points a and b by substracting Point a from Point b\n vector_a = keypoint_to_vector(a, b)\n vector_c = keypoint_to_vector(c, b)\n # https://de.wikipedia.org/wiki/Skalarprodukt => winkel phi = arccos(...)\n phi = np.arccos(np.dot(vector_a, vector_c) / (np.linalg.norm(vector_a) * np.linalg.norm(vector_c)))\n angle_left_opening = np.cross(vector_a, vector_c) < 0\n return phi if angle_left_opening else -phi", "def phitheta(loc):\n x = loc[0]\n y = loc[1]\n z = loc[2]\n r = sqrt(x**2 + y**2 + z**2)\n theta = arcsin(z/r)\n phi = arctan2(y,x)\n return(phi, theta)", "def proj_gnomonic_plane(lamb0, phi1, lamb, phi):\n\n cosc = np.sin(phi1)*np.sin(phi)\n cosc += np.cos(phi1)*np.cos(phi)*np.cos(lamb-lamb0)\n\n x = np.cos(phi)*np.sin(lamb-lamb0)\n x /= cosc\n\n y = np.cos(phi1)*np.sin(phi)\n y -= np.sin(phi1)*np.cos(phi)*np.cos(lamb-lamb0)\n\n y /= cosc\n\n return x, y", "def theta_phi_of_complex(z):\n return np.stack([theta_of_complex(z), phi_of_complex(z)], axis=1)", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def calculate_coefficients(self):\n for i in range(0, self.nz):\n zno = i * self.dz\n self.z[0][i] = zno\n plot_eccentricity_error = False\n position = -1\n for j in range(0, self.ntheta):\n # fmt: off\n self.gama[i][j] = j * self.dtheta + (np.pi - self.beta)\n [radius_external, self.xre[i][j], self.yre[i][j]] = \\\n self.external_radius_function(self.gama[i][j])\n [radius_internal, self.xri[i][j], self.yri[i][j]] = \\\n self.internal_radius_function(zno, self.gama[i][j])\n self.re[i][j] = radius_external\n self.ri[i][j] = radius_internal\n\n w = self.omega * self.ri[i][j]\n\n k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *\n (np.log(self.ri[i][j]) - 1 / 2)) / (self.ri[i][j] ** 2 - self.re[i][j] ** 2)\n\n self.c1[i][j] = (1 / (4 * self.viscosity)) * ((self.re[i][j] ** 2 * np.log(self.re[i][j]) -\n self.ri[i][j] ** 2 * np.log(self.ri[i][j]) +\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) *\n (k - 1)) - 2 * self.re[i][j] ** 2 * (\n (np.log(self.re[i][j]) + k - 1 / 2) * np.log(\n self.re[i][j] / self.ri[i][j])))\n\n self.c2[i][j] = (- self.ri[i][j] ** 2) / (8 * self.viscosity) * \\\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2 -\n (self.re[i][j] ** 4 - self.ri[i][j] ** 4) /\n (2 * self.ri[i][j] ** 2)) +\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2) /\n (self.ri[i][j] ** 2 *\n np.log(self.re[i][j] / self.ri[i][j]))) *\n (self.re[i][j] ** 2 * np.log(self.re[i][j] / self.ri[i][j]) -\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) / 2))\n\n self.c0w[i][j] = (- w * self.ri[i][j] *\n (np.log(self.re[i][j] / self.ri[i][j]) *\n (1 + (self.ri[i][j] ** 2) / (self.re[i][j] ** 2 - self.ri[i][j] ** 2)) - 1 / 2))\n # fmt: on\n if not plot_eccentricity_error:\n if abs(self.xri[i][j]) > abs(self.xre[i][j]) or abs(\n self.yri[i][j]\n ) > abs(self.yre[i][j]):\n plot_eccentricity_error = True\n position = i\n if plot_eccentricity_error:\n self.plot_eccentricity(position)\n sys.exit(\n \"Error: The given parameters create a rotor that is not inside the stator. \"\n \"Check the plotted figure and fix accordingly.\"\n )", "def DistanceSquared2RRhoPhi(rr1,rr2,rr3):\n \n # Calculate the square-distances of \n # each pair of atoms.\n rr1 = np.array(rr1)\n rr2 = np.array(rr2) \n rr3 = np.array(rr3)\n \n B = (rr1 + rr2 + rr3) / 3 # B = R**2 (1 + rho**2)\n \n x = (2*rr1 - rr2 - rr3) / 6 # x = R**2 rho cos\n y = (rr2 - rr3) / np.sqrt(12) # y = R**2 rho sin\n \n A2 = x**2 + y**2 # A^2, A = R**2 rho\n \n phi = np.arctan2(y,x) # tan = y/x\n \n phi = np.mod(phi, 2*np.pi) # Move [-pi,pi] to [0, 2*pi)\n \n R2 = (B/2) * (1 + np.sqrt(abs(1 - 4*A2/B**2))) # R**2\n R = np.sqrt(R2) \n rho = np.sqrt(A2) / R2 \n \n return R, rho, phi", "def getPhNodes(self,pH, pos=None, rootA=False):\n\t\tif pos==None: \n\t\t\tpos=self.pos\n\t\t\t[r,th]=self.posCyl\n\t\t\tif len(self.plantHeads)==1:\n\t\t\t\torig=self.getPHCoord(pos)[0]\n\t\t\telse:\n\t\t\t\tpositions=self.getPHCoord(pos)\n\t\t\t\torig=positions[pH.number]\n\t\telse:\n\t\t\t#first, determine the direction of the CRANE\n\t\t\torig=pos\n\t\t\tposCyl=self.m.getCylindrical(pos)\n\t\t\t#this may seem like a sign-error, but remember that pos is pH-pos. Need th for self, not pH\n\t\t\tif len(self.plantHeads)==1: #bracke\n\t\t\t\tth=posCyl[1]\n\t\t\telse:\n\t\t\t\tx=-(self.plantAreaW*0.5-pH.width*0.5)\n\t\t\t\tx+=pH.number*self.plantSepDist #this is the upper part of the triangle.\n\t\t\t\tth=posCyl[1]+asin(x/posCyl[0])\n\t\tdirection=self.m.direction+th-pi/2.\n\t\tcart=self.m.getCartesian\n\t\tw=pH.width\n\t\tl=pH.length\n\t\tif rootA:\n\t\t\tw=w*0.75\n\t\t\tl=l*0.75\n\t\tc1=cart([w/2., l/2.],origin=orig, direction=direction, local=False, fromLocalCart=True)\n\t\tc2=cart([-w/2., l/2.],origin=orig, direction=direction, local=False, fromLocalCart=True)\n\t\tc3=cart([-w/2., -l/2.],origin=orig, direction=direction, local=False, fromLocalCart=True)\n\t\tc4=cart([w/2., -l/2.],origin=orig, direction=direction, local=False, fromLocalCart=True)\n\t\treturn [c1,c2,c3,c4]", "def thetaInner(self):\n if self.theta in range(1, len(self.ThRZmesh.getPositions(label=\"Th\"))):\n Th = self.ThRZmesh.getUpper(label=\"Th\", n=(self.theta - 1))\n else:\n runLog.warning(\n \"Error: Azimuthal Index ({0}) location not INSIDE mesh \".format(\n self.theta\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"Th\"))\n Th = None\n return Th", "def phi(self):\n return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)", "def intersection_left(theta):\n a_ccs_lsm_trans = hom_translation_matrix(\n t_x=0.139807669447128, t_y=0.0549998406976098, t_z=-0.051)\n a_ccs_lsm_rot = hom_rotation(z_axis_rotation_matrix(radians(-345.0)))\n a_mcs_1_joint = hom_rotation(z_axis_rotation_matrix(theta))\n a_mcs_1_sp_1_1 = hom_translation_matrix(\n t_x=0.085, t_y=0, t_z=-0.0245)\n\n a_ccs_sp_1_1 = a_ccs_lsm_trans @ a_ccs_lsm_rot @ a_mcs_1_joint @ a_mcs_1_sp_1_1\n return get_translation(a_ccs_sp_1_1)", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def spherical_deproject(phi, theta,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat,\n native_pole_x): # pragma: no cover\n\n d_phi = phi - native_pole_x\n right_angle = np.pi / 2\n\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n cx = celestial_pole_x + d_phi - np.pi\n cy = theta\n else:\n cx = celestial_pole_x - d_phi\n cy = -theta\n\n else:\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n cos_d_phi = np.cos(d_phi)\n cx = celestial_pole_x + np.arctan2(\n -cos_theta * np.sin(d_phi),\n ((sin_theta * celestial_cos_lat)\n - (cos_theta * celestial_sin_lat * cos_d_phi)))\n cy = asin(\n (sin_theta * celestial_sin_lat)\n + (cos_theta * celestial_cos_lat * cos_d_phi))\n\n return cx, cy", "def test_from_rotation_angle_coordinate_of_phi(rotationangle):\n\n # Get the coordinate at phi\n phi_dash = rotationangle[\"phi\"]\n c3 = rotationangle[\"cs\"].from_rotation_angle(phi_dash)\n\n # Ensure that it is at the origin\n assert c3 == pytest.approx(0.0)", "def pol2cart(theta: float, rho: float) -> typing.Tuple[float, float]:\n return rho * cos(theta), rho * sin(theta)", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def _get_midplane_polar_coords(self, x0, y0, inc, PA):\n x_mid, y_mid = self._get_midplane_cart_coords(x0, y0, inc, PA)\n return np.hypot(y_mid, x_mid), np.arctan2(y_mid, x_mid)", "def get_torque(self, theta, modulus):\n\n\t\treturn self.get_k(modulus)*theta", "def thetaOuter(self):\n if self.theta in range(1, len(self.ThRZmesh.getPositions(label=\"Th\"))):\n Th = self.ThRZmesh.getUpper(label=\"Th\", n=(self.theta))\n else:\n runLog.warning(\n \"Error: Azimuthal Index ({}) location not INSIDE mesh \".format(\n self.theta\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"Th\"))\n Th = None\n return Th", "def cart2pol(x, y, z=None):\n x = np.asarray(x)\n y = np.asarray(y)\n\n rho = np.hypot(x, y)\n theta = np.arctan2(y, x)\n\n if z is None:\n return theta, rho\n else:\n return theta, rho, z", "def pol2cart(theta, rho):\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n return x, y", "def calc_theta(U, V, quad, h_length, radians):\n import numpy as np\n theta = np.arcsin(U / h_length)\n import numpy as np\n if quad == 1:\n theta = theta\n elif quad == 2:\n theta = -theta + np.pi / 2\n elif quad - - 3:\n theta = np.pi / 2 + theta + np.pi\n elif quad == 4:\n theta = 3 * np.pi / 2\n theta = 2 * np.pi - theta\n if not radians:\n theta = theta * 180 / np.pi\n\n return (theta)", "def jac(p, r, theta):\n a, e = p\n da = (1 - e**2)/(1 - e*np.cos(theta))\n de = (-2*a*e*(1-e*np.cos(theta)) + a*(1-e**2)*np.cos(theta))/(1 -e*np.cos(theta))**2\n return -da, -de\n return np.array((-da, -de)).T", "def spheric2cart(theta, phi):\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n return x, y, z", "def compute_egocentric_delta(p1, r1, p2, r2):\n x1, y1, z1 = p1\n x2, y2, z2 = p2\n theta_1 = compute_heading_from_quaternion(r1)\n theta_2 = compute_heading_from_quaternion(r2)\n\n D_rho = math.sqrt((x1 - x2) ** 2 + (z1 - z2) ** 2)\n D_phi = (\n math.atan2(x2 - x1, -z2 + z1) - theta_1\n ) # counter-clockwise rotation about Y from -Z to X\n D_theta = theta_2 - theta_1\n\n return (D_rho, D_phi, D_theta)", "def parang (hourangle, declination, latitude):\n\n return -np.arctan2 (-np.sin (hourangle),\n np.cos (declination) * np.tan (latitude)\n - np.sin (declination) * np.cos (hourangle))", "def clockwise(p1, p2, p3):\n\tv1 = p2 - p1\n\tv2 = p3 - p2\n\tc = (v2.x * v1.y) - (v1.x * v2.y)\n\tif c > 0:\n\t\treturn True\n\telse:\n\t\treturn False", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def angle3pt(\n ax: float, ay: float, bx: float, by: float, cx: float, cy: float\n ) -> float:\n ang = math.degrees(math.atan2(cy - by, cx - bx) - math.atan2(ay - by, ax - bx))\n return ang + 360 if ang < 0 else ang", "def spherical_project(x, y, cos_lat, sin_lat,\n celestial_pole_x, celestial_pole_y,\n celestial_cos_lat, celestial_sin_lat, native_pole_x\n ): # pragma: no cover\n right_angle = np.pi / 2\n\n d_lon = x - celestial_pole_x\n if equal_angles(np.abs(celestial_pole_y), right_angle):\n if celestial_pole_y > 0:\n phi = native_pole_x + d_lon + np.pi\n theta = y\n else:\n phi = native_pole_x - d_lon\n theta = -y\n else:\n cos_d_lon = np.cos(d_lon)\n\n phi = native_pole_x + np.arctan2(\n -cos_lat * np.sin(d_lon),\n (sin_lat * celestial_cos_lat)\n - (cos_lat * celestial_sin_lat * cos_d_lon))\n\n theta = asin(\n (sin_lat * celestial_sin_lat)\n + (cos_lat * celestial_cos_lat * cos_d_lon))\n\n phi = np.fmod(phi, two_pi)\n\n return theta, phi", "def _circleCircleTangentsXY(c1,c2):\n\n a = c1[1][0]\n b = c2[1][0]\n if a>b:\n bigIsOne=True\n bigC = c1\n smallC = c2\n else:\n bigIsOne=False\n bigC = c2\n smallC = c1\n ## Consdier the triangle created by the center of the small\n ## circle, the center of the large circle, and the point at the 90\n ## degree intersection of the line from the center of the small\n ## circle to the radian of the tangent point on the large circle.\n ## This is a right triangle with one leg of length d (distance of\n ## centers), one leg of length bigR-smallR, and one leg of unknown\n ## length, beta. theta is the angle formed by d and beta, which is\n ## also the angle of one of the the tangent lines, the other being\n ## -theta.\n ## \n ## we will calulate theta as follows:\n ## beta^2 - (r2-r1)^2 = d^2\n ## beta = sqrt( d^2 - (r2-r1)^2 )\n ## theta = atan ((r2-r1)/beta)\n \n r1 = smallC[1][0]\n r2 = bigC[1][0]\n\n d = dist(c1[0],c2[0])\n mpd = mpm.mpf(d)\n dr = r2-r1\n mpdr = mpm.mpf(dr)\n\n if d <= dr: #centers too close\n raise ValueError('circleCircleTangentsXY: centers of circles too close')\n \n beta = mpm.sqrt( mpd*mpd - mpdr*mpdr)\n theta = float(mpm.atan2(dr,beta))\n\n ## now, figure out the angle created by the center of the large\n ## circle with respect to the small circle\n dd = sub(bigC[0],smallC[0])\n phi = atan2(dd[1],dd[0])\n\n ## the two lines have angle phi+theta, and phi-theta. The\n ## intersection point of these lines is at the point on the circle\n ## phi+theta+90', and phi-theta-90'\n gamma1 = phi+theta+pi/2\n gamma2 = phi-theta-pi/2\n n1 = point(cos(gamma1),sin(gamma1))\n n2 = point(cos(gamma2),sin(gamma2))\n p1 = add(scale3(n1,r1),smallC[0])\n p2 = add(scale3(n1,r2),bigC[0])\n p3 = add(scale3(n2,r1),smallC[0])\n p4 = add(scale3(n2,r2),bigC[0])\n\n l1 = l2 = []\n if bigIsOne:\n l1=line(p2,p1)\n l2=line(p4,p3)\n else:\n l1 = line(p1,p2)\n l2 = line(p3,p4)\n\n return [l1,l2]", "def Spin(phi,theta):\n return 1/2*(cos(phi)*sin(theta)*xhat + sin(phi)*sin(theta)*yhat + cos(theta)*zhat)", "def inverse_kinematic(point_of_interest, degrees=False):\n #Link size (with motor diameter)\n L2 = 11.300\n L3 = 10.844\n\n x_c = point_of_interest.item(0)\n y_c = point_of_interest.item(1)\n z_c = point_of_interest.item(2)\n\n theta_1 = np.arctan2(y_c, x_c) #First joint angle\n a = L2*np.cos(theta_1) #L2 projection in xz plane with theta_1\n b = L3*np.cos(theta_1) #L3 projection in xz plane with theta_1\n\n # Cosine rule to compute theta_3\n k = (-np.power(x_c, 2) - np.power(z_c,2) + np.power(a, 2) + np.power(b, 2))/(2*a*b)\n theta_3 = np.pi - np.arccos(k);\n\n # Compute theta_2 using trigonometry\n # theta_2: angle between L1 and L2\n p = b*np.sin(theta_3)/(a + b*np.cos(theta_3))\n theta_2 = np.arctan2(z_c,x_c) - np.arctan(p)\n\n if(degrees):\n return(np.array([np.rad2deg(theta_1), np.rad2deg(theta_2), np.rad2deg(theta_3)]))\n else:\n return(np.array([theta_1, theta_2, theta_3]))", "def differential_rotation(lat, A, B, C):\n \n lat_deg = lat * np.pi/180.\n return A + B * np.sin(lat_deg)**2 + C * np.sin(lat_deg)**4", "def Cartesian(self, BAT):\n # Arrange BAT coordinates in convenient arrays\n offset = 6 if len(BAT) == (3 * self.natoms) else 0\n bonds = BAT[offset + 3::3]\n angles = BAT[offset + 4::3]\n phase_torsions = BAT[offset + 5::3]\n torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \\\n if self._firstTorsionTInd[n]!=n else phase_torsions[n] \\\n for n in range(self.ntorsions)]\n\n p1 = np.array([0., 0., 0.])\n p2 = np.array([0., 0., BAT[offset]])\n p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \\\n BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])\n\n # If appropriate, rotate and translate the first three atoms\n if offset == 6:\n # Rotate the third atom by the appropriate value\n (phi, theta, omega) = BAT[3:6]\n co = np.cos(omega)\n so = np.sin(omega)\n Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])\n p3 = Romega.dot(p3)\n # Rotate the second two atoms to point in the right direction\n cp = np.cos(phi)\n sp = np.sin(phi)\n ct = np.cos(theta)\n st = np.sin(theta)\n Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],\n [-st, 0, ct]])\n p2 = Re.dot(p2)\n p3 = Re.dot(p3)\n # Translate the first three atoms by the origin\n origin = np.array(BAT[:3])\n p1 += origin\n p2 += origin\n p3 += origin\n\n XYZ = np.zeros((self.natoms, 3))\n\n XYZ[self.rootInd[0]] = p1\n XYZ[self.rootInd[1]] = p2\n XYZ[self.rootInd[2]] = p3\n\n for ((a1,a2,a3,a4), bond, angle, torsion) in \\\n zip(self._torsionIndL,bonds,angles,torsions):\n sphere = Sphere(Vector(XYZ[a2]), bond)\n cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)\n plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))\n points = sphere.intersectWith(cone).intersectWith(plane123)\n p = points[0] if (Plane(Vector(XYZ[a3]), Vector(\n XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]\n p = rotatePoint(Vector(p),\n Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),\n torsion)\n XYZ[a1] = p.array\n\n return XYZ\n\n for ((a1,a2,a3,a4), bond, angle, torsion) in \\\n zip(self._torsionIndL,bonds,angles,torsions):\n\n p2 = XYZ[a2]\n p3 = XYZ[a3]\n p4 = XYZ[a4]\n\n # circle = sphere.intersectWith(cone)\n n23 = normalize(p3 - p2)\n\n # points = circle.intersectWith(plane123)\n # plane.intersectWith(Plane(circle.center, circle.normal)) is a line\n # line_direction = cross(normalize(cross(p4-p3,n23)),n23)\n\n # Rotate the point about the p2-p3 axis by the torsion angle\n v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(\n normalize(cross(p4 - p3, n23)), n23)\n s = np.sin(torsion)\n c = np.cos(torsion)\n XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(\n n23 * v21) * n23 * (1.0 - c) + v21 * c", "def phi(cylindrical_x: sc.Variable, cylindrical_y: sc.Variable) -> sc.Variable:\n return sc.atan2(y=cylindrical_y, x=cylindrical_x)", "def euler_to_rot3d(psi, theta, phi):\n rphi = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n rtheta = np.array([[np.cos(theta), 0, np.sin(theta)],\n [0, 1, 0],\n [-np.sin(theta), 0, np.cos(theta)]])\n rpsi = np.array([[np.cos(psi), -np.sin(psi), 0],\n [np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(rpsi, np.dot(rtheta, rphi))", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def find_plane_angles(self, roof_motor_position):\n\n # Calcolo il punto mediano tra i vertici 2 e 3\n pc_x = (self.roof_vertex_x[1] + self.roof_vertex_x[2]) / 2\n pc_y = (self.roof_vertex_y[1] + self.roof_vertex_y[2]) / 2\n pc_z = (self.roof_vertex_z[1] + self.roof_vertex_z[2]) / 2\n\n # Questa non so cosa sia\n base_r = [[self.roof_vertex_x[0] - pc_x, self.roof_vertex_y[0] - pc_y, self.roof_vertex_z[0] - pc_z],\n [self.roof_vertex_x[1] - pc_x, self.roof_vertex_y[1] - pc_y, self.roof_vertex_z[1] - pc_z],\n [0.0, 0.0, 0.0]]\n\n # Questa e' la costruzione di una matrice\n mat_rot = [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]]\n\n # Non so quale operazione è implementata, ma a me servono solo tre elementi, j=2, i=0,1, j=1, i=0\n # Primo elemento, j=1, i=0\n mr = math.sqrt((base_r[0][0] ** 2) + (base_r[0][1] ** 2) + (base_r[0][2] ** 2))\n mat_rot[1][0] = base_r[0][1] / mr\n # Secondo elemento, j=2, i=0\n mat_rot[2][0] = base_r[0][2] / mr\n # Terzo elemento, j=2, i=1\n mr = math.sqrt((base_r[1][0] ** 2) + (base_r[1][1] ** 2) + (base_r[1][2] ** 2))\n mat_rot[2][1] = base_r[1][2] / mr\n\n # In alternativa posso calcolare tutti gli elementi della matrice\n # for i in range(2):\n # mr = math.sqrt((base_r[i][0] ** 2) + (base_r[i][1] ** 2) + (base_r[i][2] ** 2))\n # for j in range(3):\n # base_r[i][j] /= mr\n # mat_rot[j][i] = base_r[i][j]\n\n # Sono elementi della matrice non utilizzati\n # base_r[2][0] = +base_r[1][1] * base_r[0][2] - base_r[0][1] * base_r[1][2]\n # base_r[2][1] = -base_r[1][0] * base_r[0][2] + base_r[0][0] * base_r[1][2]\n # base_r[2][2] = +base_r[1][0] * base_r[0][1] - base_r[0][0] * base_r[1][1]\n # for i in range(3):\n # mat_rot[i][2] = base_r[2][i]\n\n # Qui estraggo la terna di Tait-Bryan angles usata internamente, la Z1Y2X3\n k17 = mat_rot[2][0]\n k16 = mat_rot[1][0]\n l17 = mat_rot[2][1]\n m20 = math.asin(k17)\n i23 = math.cos(m20)\n i24 = k16 / i23\n i25 = l17 / i23\n m19 = math.asin(i24)\n self.zyx1_r = m19 + roof_motor_position\n self.zyx2_r = math.asin(k17)\n self.zyx3_r = math.asin(i25)\n self.zyx3 = self.zyx3_r / Kinematic.M_TO_RAD\n self.zyx2 = self.zyx2_r / Kinematic.M_TO_RAD\n self.zyx1 = self.zyx1_r / Kinematic.M_TO_RAD\n angles = self.zyx_r_to_xyz(self.zyx3_r, self.zyx2_r, self.zyx1_r)\n self.xyz1 = angles[2]\n self.xyz2 = angles[0]\n self.xyz3 = angles[1]\n self.xyz1_r = angles[5]\n self.xyz2_r = angles[3]\n self.xyz3_r = angles[4]", "def Misorien2FZ3(m1,m2,symtype='Cubic'):\n if symtype!='Cubic':\n print \"only calculate axis for cubic symmetry\"\n return\n m2=np.matrix(m2)\n dm=(m2.T).dot(m1)\n ops=GetSymRotMat(symtype)\n angle=6.3\n for op1 in ops:\n for op2 in ops:\n tmp=op2.dot(dm.dot(op1))\n cosangle=0.5*(tmp.trace()-1)\n cosangle=min(0.9999999,cosangle)\n cosangle=max(-0.9999999,cosangle)\n newangle=np.arccos(cosangle)\n if newangle<angle:\n w,W=np.linalg.eig(tmp)\n i=np.where(abs(np.real(w)-1)<1e-8)[0]\n direction=np.asarray(np.real(W[:,i[-1]])).squeeze()\n if abs(direction[0])>1e-8:\n sina=(tmp[2,1]-tmp[1,2])/2.0/direction[0]\n if sina<0:\n direction=-direction\n if direction[0]>direction[1] and direction[1]>direction[2] and direction[2]>0:\n angle=newangle\n axis=direction\n tmp=tmp.T\n w,W=np.linalg.eig(tmp)\n i=np.where(abs(np.real(w)-1)<1e-8)[0]\n direction=np.asarray(np.real(W[:,i[-1]])).squeeze()\n if abs(direction[0])>1e-8:\n sina=(tmp[2,1]-tmp[1,2])/2.0/direction[0]\n if sina<0:\n direction=-direction\n if direction[0]>direction[1] and direction[1]>direction[2] and direction[2]>0:\n angle=newangle\n axis=direction\n\n return axis,angle", "def spherical_function(j, x, y, z):\n theta = np.arccos(z)\n phi = np.arctan2(y, x)\n return angular_function(j, theta, phi)", "def intersection_ring(self, q_total):\n \n # WARNING: This ignores the effect of the incident angle\n \n \n\n # This is a point that intersects the Ewald sphere\n # (if incident_angle = 0)\n theta = np.arcsin(q_total/(2*self.get_k()))\n qx, qy, qz = 0, -q_total*np.sin(theta), q_total*np.cos(theta)\n \n #qx, qy, qz = 0, 0, q_total\n \n qxs = []\n qys = []\n qzs = []\n \n for rot_angle in np.linspace(0, 2*np.pi, num=200):\n qx_rot = qx*np.cos(rot_angle) + qz*np.sin(rot_angle)\n qy_rot = qy\n qz_rot = -qx*np.sin(rot_angle) + qz*np.cos(rot_angle)\n qxy_rot = np.sqrt(np.square(qx_rot)+np.square(qy_rot))\n if qx_rot<0:\n qxy_rot *= -1\n \n qxs.append( qx_rot )\n qys.append( qy_rot )\n qzs.append( qz_rot )\n \n return qxs, qys, qzs", "def cart2pol(x,y):\r\n th = np.angle(x+1j*y)\r\n rho = np.abs(x+1j*y)\r\n \r\n return th, rho", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def vsh3(m, l, theta, phi):\n if l==0: return np.array([0, 0, 0])\n\n r = 1\n R = np.array([r, 0, 0])\n gradY = sph_harm_gradient(m, l, r, theta, phi)\n return -1j * np.cross(R, gradY) / np.sqrt( l * (l + 1))", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def log_so3(R):\n theta = np.arccos((np.trace(R) - 1) / 2)\n return (R - R.T) * theta / (2*np.sin(theta))", "def SPHarmCoefficients(self, l, m,\n l2, m2, diracsign_2,\n l3, m3, diracsign_3):\n topleftjms = (l2, m2, 1/2)\n toprightjms = (l3, m3, 1/2)\n top = self.SPHarmComponents(l, m, *topleftjms, *toprightjms) * (diracsign_2 * diracsign_3/2) # sign from top part (gb 9.57) and normalization\n bottomleftjms = (l2, m2, -1/2)\n bottomrightjms = (l3, m3, -1/2)\n bottom = self.SPHarmComponents(l, m, *bottomleftjms, *bottomrightjms) * (1/2) # sign from bottom part (1j*-1j) and normalization\n return top + bottom", "def force ( r ):\n \n assert r.shape == (n,3), 'Incorrect shape of r'\n\n d = np.zeros_like(r) # Create d vectors (bonds)\n d[1:n,:] = r[1:n,:] - r[0:n-1,:] # Compute d vectors (zero index not used)\n\n # Store C coefficients in a matrix\n # In the general case we would not need to calculate every pair\n # and also we would make use of the symmetry cc[a,b]=cc[b,a]\n cc = np.zeros((n,n),dtype=np.float_) # Create C array (scalar products)\n for a in range(1,n):\n for b in range(1,n):\n cc[a,b]=np.dot(d[a,:],d[b,:]) # Compute C array (zero indices not used)\n\n a = n-1 # For this test there is just one angle\n\n # Here is the potential as a function of cos(theta)\n # For testing we use the simplest form: v= -cos(theta)\n # The notation matches that used in the appendix\n\n prefac = 1.0 / np.sqrt(cc[a,a]*cc[a-1,a-1])\n fac = cc[a,a-1]\n pot = -prefac*fac # This is -cos(theta)\n\n # Here we include the derivative of the potential with respect to cos(theta) in the prefactor\n # For this simple case it is -1, so the forces are simply gradients of cos(theta) as in the text\n f = np.empty_like(r) # Create force array\n fac1 = fac / cc[a,a]\n fac2 = fac / cc[a-1,a-1]\n f[a,:] = -prefac * ( fac1*d[a,:] - d[a-1,:] )\n f[a-1,:] = prefac * ( fac1*d[a,:] - fac2*d[a-1,:] + d[a,:] - d[a-1,:] )\n f[a-2,:] = prefac * ( fac2*d[a-1,:] - d[a,:] )\n\n return pot, f", "def correccion(self, Z):\n K = self.P @ self.H.T @ inv(self.H @ self.P @ self.H.T + self.R)\n self.X += K @ (Z - self.H @ self.X)\n self.P = self.P - K @ self.H @ self.P\n\n return self.X", "def phi(self, i):\n if i == 1 or not self.connected_to_previous(i):\n return 0.0\n\n res = self.all_residues[i]\n\n try:\n n = res['N'].get_vector()\n ca = res['CA'].get_vector()\n c = res['C'].get_vector()\n\n res_minus_one = self.all_residues[i -1]\n cp = res_minus_one['C'].get_vector()\n phi = calc_dihedral(cp, n, ca, c)\n return phi\n except Exception:\n print \"Could not get phi for \"+repr(i)\n raise LookupError", "def pol2cart(th,rho):\r\n x = rho * np.cos(th)\r\n y = rho * np.sin(th)\r\n\r\n return x, y", "def theta_ft(h,ft_intercept,gamma):\n theta_top = ft_intercept + h*gamma\n return theta_top", "def theta_of_complex(z):\n return 2*np.arcsin(np.sqrt(np.real((z * np.conj(z)))))", "def thirdO(latt_vec_array: tuple, charge: int, n: int) -> float:\n\n cell_scale = 1.0 # SKW: In notebook workflow cell parameters are converted to Cartesians and units of Angstroms \n cell = (latt_vec_array*cell_scale) * angstrom.rescale(a0) \n\n #Anuj_05/22/18:modified to \"third_order\"\n thirdO = third_order(cell, n) * (4e0*pi/3e0) * Ry.rescale(eV) * charge * charge\n\n return thirdO", "def get_phi_chi_omega(self, angles):\n (phi, chi, omega) = angles[0:3]\n return (phi, chi, omega)", "def axis_angle_matrix3(unit, theta):\n x, y, z = unit\n c = math.cos(theta)\n s = math.sin(theta)\n C = 1 - c\n return np.matrix([\n [x * x * C + c, x * y * C - z * s, x * z * C + y * s],\n [y * x * C + z * s, y * y * C + c, y * z * C - x * s],\n [z * x * C - y * s, z * y * C + x * s, z * z * C + c],\n ])", "def joint_cov_prox(Y, nu, theta, t):\n if Y is None:\n return nu\n\n n, nk = Y[0].shape\n Yemp = Y[0]@Y[0].T\n \n s, Q = np.linalg.eigh(nu/(t*nk)-Yemp/nk)\n w = ((t*nk)*s + np.sqrt(((t*nk)*s)**2 + 4*(t*nk)))/2\n return Q @ np.diag(w) @ Q.T", "def compute_heading_from_quaternion(r):\n # quaternion - np.quaternion unit quaternion\n # Real world rotation\n direction_vector = np.array([0, 0, -1]) # Forward vector\n heading_vector = quaternion_rotate_vector(r.inverse(), direction_vector)\n\n phi = -np.arctan2(heading_vector[0], -heading_vector[2]).item()\n return phi", "def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()", "def orbit_cross():\n \n # potential\n ham = gp.Hamiltonian(gp.MilkyWayPotential(nucleus=dict(m=0), halo=dict(c=0.95, m=7E11), bulge=dict(m=4E9), disk=dict(m=5.5e10)))\n gc_frame = coord.Galactocentric(galcen_distance=8*u.kpc, z_sun=0*u.pc)\n \n # orbital solution\n pos = np.load('/home/ana/projects/GD1-DR2/data/gd1_orbit.npy')\n phi1, phi2, d, pm1, pm2, vr = pos\n\n c = gc.GD1(phi1=phi1*u.deg, phi2=phi2*u.deg, distance=d*u.kpc, \n pm_phi1_cosphi2=pm1*u.mas/u.yr,\n pm_phi2=pm2*u.mas/u.yr,\n radial_velocity=vr*u.km/u.s)\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)\n \n dt = 0.5 * u.Myr\n n_steps = 250\n fit_orbit = ham.integrate_orbit(w0, dt=dt, n_steps=120)\n\n # find gap 6D location at present\n gap_phi0 = -40*u.deg\n model_gd1 = fit_orbit.to_coord_frame(gc.GD1, galactocentric_frame=gc_frame)\n gap_i = np.abs(model_gd1.phi1.wrap_at(180*u.deg) - gap_phi0).argmin()\n gap_w0 = fit_orbit[gap_i]\n \n # gap orbit\n t1 = 0*u.Myr\n t2 = -1*u.Gyr\n dt = -0.5\n t = np.arange(t1.to(u.Myr).value, t2.to(u.Myr).value+dt, dt)\n gap_orbit = ham.integrate_orbit(gap_w0, dt=dt, t1=t1, t2=t2)\n \n \n # plot relative distances as a function of time\n plt.close()\n plt.figure(figsize=(9,5))\n \n lw = 3\n\n # show classicals\n tcls = Table.read('../data/positions_classical.fits')\n ra, dec, d, pmra, pmdec, vr = tcls['ra'], tcls['dec'], tcls['distance'], tcls['pmra'], tcls['pmdec'], tcls['vr']\n cs = coord.ICRS(ra=ra, dec=dec, distance=d, pm_ra_cosdec=pmra, pm_dec=pmdec, radial_velocity=vr)\n ws = gd.PhaseSpacePosition(cs.transform_to(gc_frame).cartesian)\n satellite_orbit = ham.integrate_orbit(ws, dt=dt, t1=t1, t2=t2)\n for e in range(len(tcls)):\n if e==0:\n label = 'Classical\\ndwarfs'\n else:\n label = ''\n rel_distance = np.linalg.norm(gap_orbit.xyz - satellite_orbit.xyz[:,:,e], axis=0)*gap_orbit.xyz[0].unit\n plt.plot(t, rel_distance, '-', color=mpl.cm.Reds(0.9), alpha=0.5, label=label, lw=lw)\n \n # show ultrafaints\n tufd = Table.read('../data/positions_ufd.fits')\n ra, dec, d, pmra, pmdec, vr = tufd['ra'], tufd['dec'], tufd['distance'], tufd['pmra'], tufd['pmdec'], tufd['vr']\n cs = coord.ICRS(ra=ra, dec=dec, distance=d, pm_ra_cosdec=pmra, pm_dec=pmdec, radial_velocity=vr)\n ws = gd.PhaseSpacePosition(cs.transform_to(gc_frame).cartesian)\n satellite_orbit = ham.integrate_orbit(ws, dt=dt, t1=t1, t2=t2)\n for e in range(len(tufd)):\n if e==0:\n label = 'Ultra-faint\\ndwarfs'\n else:\n label = ''\n rel_distance = np.linalg.norm(gap_orbit.xyz - satellite_orbit.xyz[:,:,e], axis=0)*gap_orbit.xyz[0].unit\n plt.plot(t, rel_distance, '-', color=mpl.cm.Reds(0.7), alpha=0.5, label=label, lw=lw)\n \n # show globulars\n tgc = Table.read('../data/positions_globular.fits')\n ra, dec, d, pmra, pmdec, vr = tgc['ra'], tgc['dec'], tgc['distance'], tgc['pmra'], tgc['pmdec'], tgc['vr']\n cs = coord.ICRS(ra=ra, dec=dec, distance=d, pm_ra_cosdec=pmra, pm_dec=pmdec, radial_velocity=vr)\n ws = gd.PhaseSpacePosition(cs.transform_to(gc_frame).cartesian)\n satellite_orbit = ham.integrate_orbit(ws, dt=dt, t1=t1, t2=t2)\n for e in range(len(tgc)):\n if e==0:\n label = 'Globular\\nclusters'\n else:\n label = ''\n rel_distance = np.linalg.norm(gap_orbit.xyz - satellite_orbit.xyz[:,:,e], axis=0)*gap_orbit.xyz[0].unit\n plt.plot(t, rel_distance, '-', color=mpl.cm.Reds(0.5), alpha=0.5, label=label, lw=lw)\n\n plt.plot(t, np.abs(gap_orbit.xyz[2]), '-', color=mpl.cm.Reds(0.3), alpha=0.5, label='Disk', lw=lw, zorder=0)\n #plt.plot(t, np.sqrt(gap_orbit.xyz[0]**2 + gap_orbit.xyz[1]**2), 'r-', alpha=0.2)\n\n plt.ylim(0.1,200)\n plt.gca().set_yscale('log')\n \n plt.legend(loc=2, fontsize='small', markerscale=2)\n plt.xlabel('Time [Myr]')\n plt.ylabel('Relative distance [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/satellite_distances.png', dpi=200)\n plt.savefig('../paper/satellite_distances.pdf')", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def find_centre_of_rotation(x1, x2, y1, y2):\n\n # chords of rotation of x, y\n\n cx = x2 - x1\n cy = y2 - y1\n\n # know axis is perpendicular to both of these -> is cross product\n\n axis = cx.cross(cy).normalize()\n\n # normal vector to y chord\n\n ny = component(cy, axis).normalize().cross(axis)\n\n # origin of normal vectors, centre of x, y chords\n\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n\n # determine true origin of rotation - normal vector of x chord, construct\n # right-angle-triangle with hypotenuse from unknown origin of rotation\n # to central point of y chord oy, and adjacent the vector parallel to\n # reversed x chord => opposite is on vector from unknown origin of rotation\n # to ox\n\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / (ny).dot(-ncx)\n return oy + d * ny, axis", "def random_three_vector():\n phi = config.random.uniform(0, np.pi * 2)\n costheta = config.random.uniform(-1, 1)\n\n theta = np.arccos(costheta)\n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n\n return x, y, z", "def proj_gnomonic_sphere(lamb0, phi, x, y):\n rho = (x**2+y**2)**0.5\n c = np.arctan(rho)\n # print('c', rho, c, np.rad2deg(c))\n lamb = x*np.sin(c)\n lamb /= (rho*np.cos(phi)*np.cos(c)-y*np.sin(phi)*np.sin(c))\n lamb = lamb0+np.arctan(lamb)\n\n phi1 = np.cos(c)*np.sin(phi)\n phi1 += (y*np.sin(c)*np.cos(phi))/rho\n phi1 = np.arcsin(phi1)\n\n return lamb, phi1", "def theta(a, b):\n \n \n def norm_vec(x):\n norm_out = sqrt(dot(x, x))\n return norm_out\n \n theta = acos(dot(a, b) / (norm_vec(a) * norm_vec(b))) * 180 / pi\n \n print theta", "def getplatepos(self, phi=0, chi=0, omega=0):\n\n #Save the specified angles in the structure\n angles = np.array([phi, chi, omega]);\n\n #We divvy up the phi rotation between the plate and the sample motor.\n #We round to the nearest multiple of the sample motor step size.\n self.sample_motor_phi = round(phi / self.sample_motor_step_size) * self.sample_motor_step_size\n #And the remainder is handled by the sample plate position.\n sample_plate_phi = phi - self.sample_motor_phi\n\n #This calculates the rotation matrix for the sample PLATE only.\n rot_M_plate = rotation_matrix(sample_plate_phi, chi, omega)\n\n #And this is the rotation matrix for the sample motor only\n rot_M_motor = rotation_matrix(self.sample_motor_phi, 0, 0)\n\n\n #X,Y,Z translation vector (in mm) to perform BEFORE moving the sample plate.\n #To calculate these, we use the relative_sample_position vector.\n translate_v = -self.relative_sample_position\n #But we have to correct for the sample motor phi rotation by rotating the translation\n #vector as well.\n translate_v = np.dot(rot_M_motor, translate_v)\n \n\n #------------------ SAMPLE PLATE ----------------------\n #3 vectors representing the position of the mounting points on the plate,\n #when it is horizontal and with the sample at 0\n #Remember, the plate is in the X-Z plane.\n\n #distance between center of plate and each mounting point.\n d = self.mounting_side_length / (2 * np.cos(pi / 6))\n #Distance to the edge on the other side\n d2 = np.sin(pi / 6) * d\n\n #Vectors representing the sample plate at the \"zero\" position.\n sample_plate_zero = np.column_stack(([self.mounting_side_length / 2, self.sample_plate_height, d2],\n [-self.mounting_side_length / 2, self.sample_plate_height, d2],\n [0, self.sample_plate_height, -d]))\n\n #------------------ OTHER USEFUL POINTS ----------------------\n #Vector representing the position of the middle of the sample plate.\n sample_middle = column([0, self.sample_plate_height, 0])\n\n #Make a vector representing the position of the sample at the end of the\n #pin.\n pin = self.relative_sample_position\n\n #Make vector to represent the sample motor orientation (at zero)\n self.motor_vector_length = 20\n motor = column([0, self.sample_plate_height, self.motor_vector_length])\n\n\n #------------------ APPLY TRANSFORMS ----------------------\n #For the sample plate: we do not apply the motor_phi rotation.\n \n #Do a translation of the position - we are moving the entire sample plate\n # This places the sample in the 0,0,0 position.\n sample_plate = get_translated_vectors(sample_plate_zero, translate_v)\n\n #Now do a rotation (phi,chi,omega)\n sample_plate = dot(rot_M_plate, sample_plate)\n\n #The pin rotates with the motor, then translates, then then rotates with the\n #sample plate.\n pin = dot(rot_M_motor, pin)\n pin = get_translated_vectors(pin, translate_v)\n pin = dot(rot_M_plate, pin)\n\n #Motor vector = same as pin.\n motor = dot(rot_M_motor, motor)\n motor = get_translated_vectors(motor, translate_v)\n motor = dot(rot_M_plate, motor)\n\n #Same for the sample_middle vector\n sample_middle = dot(rot_M_motor, sample_middle)\n sample_middle = get_translated_vectors(sample_middle, translate_v)\n sample_middle = dot(rot_M_plate, sample_middle)\n\n #Sample plate coordinates are:\n #i.e. x_A2, y_A2, x_B2, etc. (as written in Janik's notebook)\n\n #We want to find the positions of the other ends of the legs on the fixed\n #plate, x_A1, etc.\n fixed_plate = np.copy(sample_plate)\n\n #Legs A and B are fixed in their orientation along Z, and C along X, so we\n #know the Z_A1, Z_B1 and X_C1 positions on the FIXED plate are the same as\n #on the SAMPLE plate.\n\n #We also know the height of all these points, y = fixed_plate_height.\n fixed_plate[COORD_Y, :] = self.fixed_plate_height\n \n #This leaves x_A1, x_B1, and z_C1 to find.\n\n #Angle between the x direction and the (A1 to A2) vector formed by leg A\n theta_A = np.arcsin((sample_plate[COORD_Y, MOUNT_A] - self.fixed_plate_height) / self.leg_length)\n if theta_A > -pi / 2:\n #Force theta_A to be ~-120 degrees\n theta_A = -pi - theta_A\n \n\n #Angle between the x direction and the B1 to B2) vector formed by leg B\n theta_B = np.arcsin((sample_plate[COORD_Y, MOUNT_B] - self.fixed_plate_height) / self.leg_length)\n\n #We can easily calculate the x position from these\n x_A1 = sample_plate[COORD_X, MOUNT_A] - self.leg_length * cos(theta_A)\n x_B1 = sample_plate[COORD_X, MOUNT_B] - self.leg_length * cos(theta_B)\n\n fixed_plate[COORD_X, MOUNT_A] = x_A1\n fixed_plate[COORD_X, MOUNT_B] = x_B1\n\n\n #Finally we find the position of Leg C\n phi_C = np.arcsin((sample_plate[COORD_Y, MOUNT_C] - self.fixed_plate_height) / self.leg_length)\n if phi_C < -pi / 2:\n #Force phi_C to be ~-60 degrees\n phi_C = 2*pi + phi_C\n\n #Now we calc. the Z position of leg C on the fixed plate.\n z_C1 = sample_plate[COORD_Z, MOUNT_C] - self.leg_length * cos(phi_C)\n fixed_plate[COORD_Z, MOUNT_C] = z_C1\n\n\n #Assign these plate position in the goniometer object, which is returned\n self.sample_plate = sample_plate\n self.fixed_plate = fixed_plate\n self.sample_plate_zero = sample_plate_zero\n\n #Also return the pin and motor vectors\n self.pin = pin\n self.motor = motor\n self.sample_middle = sample_middle" ]
[ "0.6163923", "0.6056872", "0.6046275", "0.60347825", "0.59408677", "0.5916736", "0.58936423", "0.5829221", "0.58063835", "0.57062423", "0.57015944", "0.56956786", "0.5686689", "0.56826967", "0.56771237", "0.5673097", "0.56674886", "0.56646484", "0.56586415", "0.56585634", "0.5654848", "0.56488967", "0.5643867", "0.5610945", "0.5607379", "0.5601729", "0.55632824", "0.55601156", "0.5534972", "0.55169934", "0.5504914", "0.55023545", "0.549042", "0.5476408", "0.5473025", "0.54653233", "0.5456034", "0.5450831", "0.5447856", "0.5446829", "0.5442757", "0.54267824", "0.5421733", "0.5390052", "0.5377511", "0.5370181", "0.53685695", "0.5367466", "0.5364621", "0.5361985", "0.5359783", "0.5358959", "0.5347096", "0.5346717", "0.5345464", "0.53312266", "0.5330938", "0.53276795", "0.5325671", "0.5312443", "0.5309791", "0.53074026", "0.5305952", "0.53000987", "0.52997994", "0.5297053", "0.52943534", "0.5290533", "0.5289328", "0.52812004", "0.5273404", "0.5261687", "0.52611357", "0.5247691", "0.5247211", "0.5237121", "0.5233168", "0.522347", "0.52231365", "0.52218574", "0.52151567", "0.521511", "0.5213431", "0.5212605", "0.520642", "0.5205274", "0.5198132", "0.5195292", "0.5193601", "0.51930994", "0.51920164", "0.5190731", "0.51896113", "0.5186476", "0.5175341", "0.5173835", "0.51704276", "0.51611143", "0.51463765", "0.5137348" ]
0.68433595
0
Print the composition of two transformations T2T1
def compose_transform2(alpha1, sx1, sy1, scale1, alpha2, sx2, sy2, scale2): t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":0,"scale":scale1}) t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":0,"scale":scale2}) tt = t2*t1 d = tt.get_params("2D") return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "scale" ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_transforms(self):\n self._print_frozen_transforms()\n self._print_nonfrozen_transforms()", "def print_output_task2(model1,model2):\n print(\"######################################################################\")\n print(\"Task 2 : IBM model 1 and 2 Analysis(using NLTK)\")\n print(\"######################################################################\")\n for (a,b) in zip(model1,model2):\n print(\"English Sentence : \",a.mots)\n print(\"Foreign Sentence : \",a.words)\n print(\"Alignment(Model 1): \",a.alignment)\n print(\"Alignment(Model 2): \",b.alignment)\n print(\"----------------------------------------------------------------------\")", "def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]", "def print_m(seq1, seq2, m):\n seq1 = '-' + seq1; seq2 = '-' + seq2\n print()\n print(' '.join(['%3s' % i for i in ' '+seq2]))\n for i, p in enumerate(seq1):\n line = [p] + [m[i][j] for j in range(len(seq2))]\n print(' '.join(['%3s' % i for i in line]))\n print()\n return", "def _print_nonfrozen_transforms(self):\n print(\"TRANSFORMS:\\n\" + str(self._transform_groups))\n for dd in self._datasets:\n if isinstance(dd, AvalancheDataset):\n print(\"PARENT TRANSFORMS:\\n\")\n _print_nonfrozen_transforms(dd)", "def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += f'\\n {t}'\n format_string += '\\n)'\n return format_string", "def __str__(self):\n return str(self.t1)+\"<-->t1, \\t\"+str(self.t2)+\"<-->t2, \\t\"+str(self.phi)+\"<-->phi, \\t\"+str(self.m)+\"<-->m, \\t\"+str(self.t31)+\"<-->t31, \\t\"+str(self.t32)+\"<-->t32, \\n\"", "def test_x2y2_print(self):\n from io import StringIO\n import io\n import contextlib\n r1 = Square(2, 3, 2, 2)\n temp_stdout = io.StringIO()\n with contextlib.redirect_stdout(temp_stdout):\n r1.display()\n output = temp_stdout.getvalue()\n self.assertEqual(output, '\\n\\n ##\\n ##\\n')", "def test_transform_2d(transform, alpha = 1):\r\n points = 20*[None]\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n points[i] = vec2(x, y)\r\n tr_x = random.randrange(-40, 41)\r\n tr_y = random.randrange(-40, 41)\r\n mapping = [(p, vec2(p.x + tr_x, p.y + tr_y)) for p in points]\r\n print(\"Translation\")\r\n print(\"Input\".ljust(20), \"Translation\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_translate = vec2(x + tr_x, y + tr_y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_translate.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n th = 2*math.pi*random.random()\r\n mapping = [(p, vec2(p.x*math.cos(th) - p.y*math.sin(th), p.x*math.sin(th) + p.y*math.cos(th))) for p in points]\r\n print(\"Rotation\")\r\n print(\"Input\".ljust(20), \"Rotation\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_rotate = vec2(x*math.cos(th) - y*math.sin(th), x*math.sin(th) + y*math.cos(th))\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_rotate.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n k = math.exp(2*random.random() - 1)\r\n mapping = [(p, vec2(k*p.x, k*p.y)) for p in points]\r\n print(\"Uniform scaling\")\r\n print(\"Input\".ljust(20), \"Scaling\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_scale = vec2(k*x, k*y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_scale.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()\r\n k_x = math.exp(2*random.random() - 1)\r\n k_y = 3*random.random() + 1\r\n if (k_x >= k_y + math.exp(-1)): k_y = k_x - k_y\r\n else: k_y = k_x + k_y\r\n mapping = [(p, vec2(k_x*p.x, k_y*p.y)) for p in points]\r\n print(\"Non-uniform scaling\")\r\n print(\"Input\".ljust(20), \"Scaling\".ljust(20), \"Transformation\".ljust(20))\r\n for i in range(20):\r\n x = random.randrange(-40, 41)\r\n y = random.randrange(-40, 41)\r\n v_in = vec2(x, y)\r\n v_scale = vec2(k_x*x, k_y*y)\r\n v_transform = transform(v_in, mapping, alpha)\r\n print(str(v_in).ljust(20), str(v_scale.str_repr(4)).ljust(20), str(v_transform.str_repr(4)).ljust(20))\r\n print()", "def __str__(self):\n names = [self.name]\n names += [_callable_name(transform) for transform in self.transforms]\n return ' | '.join(names) + f' -> {self.shape} {self.dtype}'", "def func_tf_print(self, arg_frame_1, arg_frame_2):\n try:\n trans = self._tfBuffer.lookup_transform(arg_frame_1, arg_frame_2, rospy.Time())\n self._pospkg_x = trans.transform.translation.x\n self._pospkg_y = trans.transform.translation.y\n self._pospkg_z = trans.transform.translation.z\n\n\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n rospy.logerr(\"TF error\")", "def __str__(self):\n shape, dtype = self._initial_shape, self._initial_dtype\n descr = [self._name_shape_dtype(self.name, shape, dtype)]\n for transform in self.transforms:\n shape, dtype = transform.new_shape(shape), transform.dtype if transform.dtype is not None else dtype\n descr += ['-> ' + self._name_shape_dtype(transform.name, shape, dtype)]\n return '\\n'.join(descr)", "def oneconnect_transformations(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"oneconnect_transformations\")", "def compose(transforms):\n trans_objs = [TRANSFORMS.build(t) for t in transforms]\n return tv_transforms.Compose(trans_objs)", "def print_nice(self):\n print(\"- \" + str(self.__node_a.name) + \" (\" + self.__node_a.get_value_string() +\n \") -> \" + str(self.__node_b.name) + \" (\" + self.__node_b.get_value_string() + \")\")", "def print_diff_summary(self, other, n_apply_to_print=15,\r\n n_ops_to_print=20):\r\n\r\n def diff_dict(a_time, b_time_):\r\n r = {}\r\n b_time = copy.copy(b_time_)\r\n for a, ta in a_time.items():\r\n r.setdefault(a, 0)\r\n tb = b_time.pop(a, 0)\r\n r[a] += ta - tb\r\n\r\n #they are missing in a\r\n for a, t in b_time.items():\r\n r.setdefault(a, 0)\r\n r[a] += t\r\n return r\r\n\r\n compile_time = self.compile_time - other.compile_time\r\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\r\n fct_call = diff_dict(self.fct_call, other.fct_call)\r\n apply_time = diff_dict(self.apply_time, other.apply_time)\r\n op_cimpl = self.op_cimpl and other.op_cimpl\r\n message = self.message\r\n outputs_size = diff_dict(self.outputs_size, other.outputs_size)\r\n\r\n self.print_summary_(\r\n \"print_diff_summary\", compile_time, fct_call_time, fct_call,\r\n apply_time, op_cimpl, message, outputs_size,\r\n n_apply_to_print=n_apply_to_print,\r\n n_ops_to_print=n_ops_to_print, print_apply=False)", "def show_transform_matrices(self):\n\n print(f'Transform Matrices are: {self.tf_matrices_list}')", "def __repr__(cls:Pipeline):\n return str(cls.tfms)", "def transpose_dot(self, other):\n from divisi2 import operators\n return operators.transpose_dot(self, other)", "def dotplot(seq1,seq2,k = 1,t = 1):\n M = __makeMatrix(str(seq1),str(seq2),k)\n __plotMatrix(M, t, str(seq1),str(seq2)) #experiment with character choice", "def ConcatTransform(*args, **kwargs):\n return _gdi_.GraphicsContext_ConcatTransform(*args, **kwargs)", "def __mul__(self, other):\r\n if isinstance(other, tuple):\r\n return self.transform_point(other)\r\n if isinstance(other, LinearTransformation):\r\n return self.left_composition(other)\r\n else:\r\n print(other, type(other))\r\n raise NotImplementedError", "def test_transform_compose(self):\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n\n p = s.make_point((2/11, 6/11, 9/11), magic)\n q = s.make_point((3/7, 6/7, 2/7), magic)\n r = s.make_point((9/17, 8/17, 12/17), magic)\n\n f, g, h = map(space_point_transform, (p, q, r))\n\n # check the core principle: (f g) x = f (g x)\n self.assertTrue(point_isclose(\n (f(g))(r),\n f(g(r))\n ))\n\n # just for good measure, let's do it again with different vars\n self.assertTrue(point_isclose(\n (g(h))(p),\n g(h(p))\n ))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (p, q, r):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref)\n ))\n\n # api says f(g) == f + g\n # this is just a convenience to let you write things with a sum instead of a product\n check_transform_eq(f(g), f + g)\n\n # non-commutative property\n check_transform_eq(f+g, g+f, invert=(k!=0))\n\n # associative property\n check_transform_eq(f+g+h, f+(g+h))\n\n # self commutative property\n f2 = f+f\n check_transform_eq(f2+f, f+f2)\n check_transform_eq(f2+f2, f+f2+f)", "def __str__(self):\n A, b = self.A, self.b\n A_rows = str(A).split('\\n')\n n_rows = len(A_rows)\n # column vector from `b`, if not already one\n b_col = b.reshape(b.shape[0], 1) if len(b.shape) == 1 else b\n b_rows = str(b_col).split('\\n')\n # place an \"x\" somewhere near the middle\n x_row = int((n_rows - 1) / 2) # where \"x\" is shown\n above = x_row\n below = (n_rows - x_row - 2)\n spacer = ' | '\n last_middle = [spacer[1:]] if n_rows > 1 else []\n middle = (\n above * [spacer]\n + [' x <= ']\n + below * [spacer]\n + last_middle)\n assert len(middle) == n_rows, (middle, n_rows)\n # format lines\n lines = [A_rows[k] + middle[k] + b_rows[k]\n for k in range(n_rows)]\n output = 'Single polytope \\n {lines}\\n'.format(\n lines='\\n '.join(lines))\n return output", "def main(num1, num2, text):\n return print(\"%30i\"%num1), print(\"%030i\"%num1), print(\"%.2f\"%num2), print(\"%.12f\"%num2), \\\n print(\"%40s\"%text)", "def compose(disp_1, disp_2, indexing='ij'):\n\n assert indexing == 'ij', \"currently only ij indexing is implemented in compose\"\n\n return disp_2 + transform(disp_1, disp_2, interp_method='linear', indexing=indexing)", "def print_diff_summary(self, other, **kwargs):\r\n\r\n def diff_dict(a_time, b_time_):\r\n r = {}\r\n b_time = copy.copy(b_time_)\r\n for a, ta in a_time.items():\r\n r.setdefault(a, 0)\r\n tb = b_time.pop(a, 0)\r\n r[a] += ta - tb\r\n\r\n #they are missing in a\r\n for a, t in b_time.items():\r\n r.setdefault(a, 0)\r\n r[a] += t\r\n return r\r\n\r\n compile_time = self.compile_time - other.compile_time\r\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\r\n fct_call = diff_dict(self.fct_call, other.fct_call)\r\n apply_time = diff_dict(self.apply_time, other.apply_time)\r\n apply_cimpl = self.apply_cimpl and other.apply_cimpl\r\n message = self.message\r\n variable_shape = diff_dict(self.variable_shape, other.variable_shape)\r\n self_linker_time = sum([ps.linker_time for ps\r\n in self.profile_stats.values()])\r\n other_linker_time = sum([ps.linker_time for ps\r\n in other.profile_stats.values()])\r\n self_optimizer_time = sum([ps.optimizer_time for ps\r\n in self.profile_stats.values()])\r\n other_optimizer_time = sum([ps.optimizer_time for ps\r\n in other.profile_stats.values()])\r\n\r\n other_time = {'linker_time': self_linker_time - other_linker_time,\r\n 'optimizer_time': self_optimizer_time -\r\n other_optimizer_time}\r\n self.print_summary_(\"print_diff_summary\", compile_time,\r\n fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n print_apply=False, other_time=other_time,\r\n **kwargs)", "def main(a, b):\n edit_distance = make_edit(set(a+b))\n dist, align = edit_distance(a, b)\n print('Distance: {0}'.format(dist))\n x, y = zip(*align)\n print(''.join(x))\n print(''.join(y))", "def simple_chained_rep_rnn_phi(t1, t2):\n return t1.leaves() + t2.leaves()", "def print_lines(f1, f2, row):\n column = col_match(f1, f2)\n print('{}c{}'.format(row, column))\n print(f1)\n print('---')\n print(f2)", "def display_analysis_result(column1: pd.Series, column2: pd.Series, name1: str, name2: str):\n\n print(\"Correlation between '%s' and '%s':\" % (name1, name2))\n print(\"Covariance: \" + str(calculate_covariance(column1, column2)))\n print(\"Correlation coefficient: \" + str(calculate_correlation_coefficient(column1, column2)))\n print(\"Significance of coefficient: \" + str(calculate_significance_of_coefficient(column1, column2)))\n print()\n\n draw_scatter_plot(column1, column2, name1, name2)\n draw_residual_plot(column1, column2, name1, name2)", "def print_layer2(eth):\n print('Ethernet: Destination MAC: %s Source MAC: %s Protocol: %s' %\n (eth_addr(eth.dst_mac), eth_addr(eth.src_mac),\n red(libs.tcpiplib.tcpip.get_ethertype(eth.protocol))))", "def cat(ts1: Tensor, ts2: Tensor, axes: int) -> Tensor:\n cat_op = CatOp(axes)\n return cat_op(ts1, ts2)", "def display(self):\n for x, p in zip(self.xs, self.ps):\n print(x, p)", "def _print_frozen_transforms(self):\n print(\"FROZEN TRANSFORMS:\\n\" + str(self._frozen_transform_groups))\n for dd in self._datasets:\n if isinstance(dd, AvalancheDataset):\n print(\"PARENT FROZEN:\\n\")\n _print_frozen_transforms(dd)", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def __update_transformation_view(self):\n text = \"Computed transformation.\\n\\n\"\n text += \"Rotation:\\n\"\n for i in range(3):\n text += str(self.rotation[i, 0]) + \" \" + str(self.rotation[i, 1]) + \" \" + str(self.rotation[i, 2]) + \"\\n\"\n text += \"\\nTranslation:\\n\"\n text += str(self.translation[0]) + \" \" + str(self.translation[1]) + \" \" + str(self.translation[2])\n text += \"\\n\\nScale:\\n\"\n text += str(self.scale)\n text += \"\\n\\n4x4 Matrix:\\n\"\n mat = self.__compose_transformation()\n for i in range(4):\n text += f\"{str(mat[i, 0])}, {str(mat[i, 1])}, {str(mat[i, 2])}, {str(mat[i, 3])}\\n\"\n self.qt_transformation_textbox.setText(text)", "def punto2_1():\r\n\tprint(\"2.1R/\")\r\n\tn = 15\r\n\tp = 0.2\r\n\tseed = 12\r\n\tvis = [False for i in range(n)]\r\n\tG,graph=erdos_renyi_seed(n,p,seed)\r\n\tnx.draw(graph,with_labels = True)\r\n\tplt.show()\t\r\n\tm1 = tarjan(G)\r\n\tnumCC = 0\r\n\tfor u in range(n):\r\n\t\tif vis[u] == False:\r\n\t\t\tnumCC+=1\r\n\t\t\tdfsCC(G,vis,u)\r\n\r\n\tavNumCC = n/numCC\r\n\tnumAp,numB = m1[0],m1[1]\r\n\tavnDeg = deg(G)\r\n\tt = tripTrian(G)\r\n\tnumTrian,numTrip = t[0],t[1]\r\n\tprint(\"numero de componentes conectados: \" + str(numCC))\r\n\tprint(\"tamaño promedio de componentes conectados: \" + str(avNumCC))\r\n\tprint(\"numero de puntos de articulacion: \" + str(numAp))\r\n\tprint(\"numero de puentes: \" + str(numB))\r\n\tprint(\"Grado promedio por nodo: \" + str(avnDeg))\r\n\tprint(\"numero de triangulos: \" + str(numTrian))\r\n\tprint(\"numero de tripletas: \" + str(numTrip))\r\n\treturn", "def draw_next_transform(self) -> List[Callable]:\n # Sample parameters for each transformation\n angle = random.randint(-self.max_angle, self.max_angle)\n x_shift = random.uniform(-self.max_x_shift, self.max_x_shift)\n y_shift = random.uniform(-self.max_y_shift, self.max_y_shift)\n contrast = random.uniform(self.min_constrast, self.max_constrast)\n brightness = random.uniform(self.min_brightness, self.max_brightness)\n horizontal_flip = ImageTransformationBase._toss_fair_coin()\n # Returns the corresponding operations\n if random.random() < self.probability_transformation:\n ops = [self.rotate(angle),\n self.translateX(x_shift),\n self.translateY(y_shift)]\n if horizontal_flip:\n ops.append(self.horizontal_flip())\n if self.for_segmentation_input_maps:\n return ops\n ops.extend([self.adjust_contrast(contrast),\n self.adjust_brightness(brightness)])\n else:\n ops = []\n return ops", "def Compose(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_Compose(self, *args)", "def print_diff_summary(self, other, **kwargs):\n\n def diff_dict(a_time, b_time_):\n r = {}\n b_time = copy.copy(b_time_)\n for a, ta in iteritems(a_time):\n r.setdefault(a, 0)\n tb = b_time.pop(a, 0)\n r[a] += ta - tb\n\n # they are missing in a\n for a, t in iteritems(b_time):\n r.setdefault(a, 0)\n r[a] += t\n return r\n\n compile_time = self.compile_time - other.compile_time\n fct_call_time = diff_dict(self.fct_call_time, other.fct_call_time)\n fct_call = diff_dict(self.fct_call, other.fct_call)\n apply_time = diff_dict(self.apply_time, other.apply_time)\n apply_cimpl = self.apply_cimpl and other.apply_cimpl\n message = self.message\n variable_shape = diff_dict(self.variable_shape, other.variable_shape)\n self_linker_time = sum([ps.linker_time for ps\n in self.profile_stats.values()])\n other_linker_time = sum([ps.linker_time for ps\n in other.profile_stats.values()])\n self_optimizer_time = sum([ps.optimizer_time for ps\n in self.profile_stats.values()])\n other_optimizer_time = sum([ps.optimizer_time for ps\n in other.profile_stats.values()])\n\n other_time = {'linker_time': self_linker_time - other_linker_time,\n 'optimizer_time': self_optimizer_time -\n other_optimizer_time}\n self.print_summary_(\"print_diff_summary\", compile_time,\n fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n print_apply=False, other_time=other_time,\n **kwargs)", "def printLine(str1, str2, d1=None, d2=None):\n print str1,\n text = str1\n for name in P.methods:\n if d1 == 'plain':\n print str2,\n text += ' ' + str2\n if d1 == 'name':\n print str2 % (name, P.units),\n text += ' ' + str2 % (name, P.units)\n if d1 and d2:\n print str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report),\n text += ' ' + str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report)\n print ''\n outtext.append(text + '\\n')\n return", "def print_decomposition(self):\n if self.my_rank != 0:\n return\n\n print()\n for i in range(self.box_space.i-1, 0, -1):\n for j in range(self.box_space.k-1, 0, -1):\n print(\" \"*j, end=\"\")\n for k in range(self.box_space.k):\n print(\"{:4d}\".format(self.rank_of_box[(i, j, k)]), end=\"\")\n print()\n print()\n print()", "def calcT1(g2, g1):\n idop = FermiOp(g2.orbs, 3, 3)\n idop.data = np.eye(int(binom(g2.orbs, 3)))\n\n return p2N(g2, 3) - p2N(g1, 3) + idop", "def display(T, p, radical, P, I, J):\n print('=' * 20)\n print(T, p, radical)\n for Pi in P:\n print(f' ({Pi!r})')\n print(\"I: \", I)\n print(\"J: \", J)\n print(f'Equal: {I == J}')", "def tikzcode(self):\n tex = \"\"\n tex += r\"\\draw\"\n if len(self.options):\n options = ', '.join(self.options)\n tex += \"[{options}] \".format(options=options)\n tex += \"({a.xpos:.4f},{a.ypos:.4f}) \".format(a=self.node_a)\n tex += \"to\"\n # if the nodes are arranged, then they have angle in/out\n inout = []\n inout.append('out={angle!s}'.format(angle=self.node_a.angle_inout))\n inout.append('in={angle!s}'.format(angle=self.node_b.angle_inout))\n if inout:\n tex += \"[\" + \", \".join(inout) + \"] \"\n tex += \"({b.xpos:.4f},{b.ypos:.4f})\".format(b=self.node_b)\n tex += \";\\n\"\n return tex", "def __str__(self):\n return self.__id__() + \" || \" + str(self.__node_a.name) + \" -> \" + str(self.__node_b.name)", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def compose(\n # Left side positive filters\n chainLeftIn,resiNumLeftIn,resiNameLeftIn,atomSerialLeftIn,\n atomNameLeftIn,\n # Left side negative filters\n chainLeftOut,resiNumLeftOut,resiNameLeftOut, atomSerialLeftOut,\n atomNameLeftOut,\n # Right side positive filters\n chainRightIn,resiNumRightIn,resiNameRightIn,atomSerialRightIn,\n atomNameRightIn,\n # Right side negative filters\n chainRightOut,resiNumRightOut,resiNameRightOut,atomSerialRightOut,\n atomNameRightOut,\n # Contact Area\n contactAreaMin,contactAreaMax,\n # Minimal distance\n minimalDistanceMin,minimalDistanceMax,\n # Sequence separation\n seqSeparationMin,seqSeparationMax\n ):\n\n output=''\n\n match_first=''\n match_first=append_to_local_output(match_first, 'c', Generic(chainLeftIn))\n match_first=append_to_local_output(match_first, 'r', Generic(resiNumLeftIn))\n match_first=append_to_local_output(match_first, 'a', Generic(atomSerialLeftIn))\n match_first=append_to_local_output(match_first, 'R', Generic(resiNameLeftIn))\n match_first=append_to_local_output(match_first, 'A', Generic(atomNameLeftIn))\n output=append_to_global_output(output, '--match-first', match_first)\n\n match_first_not=''\n match_first_not=append_to_local_output(match_first_not, 'c', Generic(chainLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'r', Generic(resiNumLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'a', Generic(atomSerialLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'R', Generic(resiNameLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'A', Generic(atomNameLeftOut))\n output=append_to_global_output(output, '--match-first-not', match_first_not)\n\n match_second=''\n match_second=append_to_local_output(match_second, 'c', Generic(chainRightIn))\n match_second=append_to_local_output(match_second, 'r', Generic(resiNumRightIn))\n match_second=append_to_local_output(match_second, 'a', Generic(atomSerialRightIn))\n match_second=append_to_local_output(match_second, 'R', Generic(resiNameRightIn))\n match_second=append_to_local_output(match_second, 'A', Generic(atomNameRightIn))\n output=append_to_global_output(output, '--match-second', match_second)\n\n match_second_not=''\n match_second_not=append_to_local_output(match_second_not, 'c', Generic(chainRightOut))\n match_second_not=append_to_local_output(match_second_not, 'r', Generic(resiNumRightOut))\n match_second_not=append_to_local_output(match_second_not, 'a', Generic(atomSerialRightOut))\n match_second_not=append_to_local_output(match_second_not, 'R', Generic(resiNameRightOut))\n match_second_not=append_to_local_output(match_second_not, 'A', Generic(atomNameRightOut))\n output=append_to_global_output(output, '--match-second-not', match_second_not)\n\n output=append_to_global_output(output, '--match-min-area', Float(contactAreaMin))\n output=append_to_global_output(output, '--match-max-area', Float(contactAreaMax))\n\n output=append_to_global_output(output, '--match-min-dist', Float(minimalDistanceMin))\n output=append_to_global_output(output, '--match-max-dist', Float(minimalDistanceMax))\n\n output=append_to_global_output(output, '--match-min-seq-sep', Int(seqSeparationMin))\n output=append_to_global_output(output, '--match-max-seq-sep', Int(seqSeparationMax))\n\n return output", "def print_report(self):\n\n if not self._translation:\n print('Failed to translate ciphertext.')\n return\n\n plaintext = self.ciphertext.translate(\n SubSolver._make_trans_from_dict(self._translation))\n print('Ciphertext:')\n print(self.ciphertext, '\\n')\n print('Plaintext:')\n print(plaintext, '\\n')\n\n print('Substitutions:')\n items = [key + ' -> ' + word for key, word\n in self._translation.items()]\n items.sort()\n i = 0\n for item in items:\n print(item + ' ', end='')\n if i % 5 == 4:\n print('')\n i += 1", "def print_matrices(self):\n\n \"\"\"\n Print Optimal Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Optimal Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\")\n else:\n print('\\t', end=\"\")\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.optimal[i][j]) + '\\t', end=\"\"),\n print(\"\")\n\n \"\"\"\n Print Direction Matrix\n \"\"\"\n print(\"\\n\", \"_\"*7, \"Direction Matrix\", \"_\"*7)\n print(\"\\t\\t\" + \"\\t\".join(list(self.sequenceB)))\n for i in range(0, len(self.sequenceA)+1):\n if i >= 1:\n print(self.sequenceA[i-1] + '\\t', end=\"\"),\n else:\n print('\\t', end=\"\"),\n for j in range(0, len(self.sequenceB)+1):\n print(str(self.direction[i][j]) + '\\t', end=\"\"),\n print(\"\")", "def test_convTranpose2d(self, _, module, inputs, filters, bias=None):\n\n utils.compare_tracing_methods(\n module, inputs, filters, fusible_ops={\"aten::_convolution\"}\n )", "def word_cross_product_phi(t1, t2):\n return Counter([(w1, w2) for w1, w2 in product(t1.leaves(), t2.leaves())])", "def task_two_test():\n # First test\n # Create points list for task two\n points = np.random.rand(2, 4)\n # Translate and rotate it somehow\n tetta = np.random.uniform(low=0, high=2 * np.pi, size=(1,))[0]\n R = np.array([[np.cos(tetta), -np.sin(tetta)],\n [np.sin(tetta), np.cos(tetta)]])\n T = np.random.uniform(low=0, high=3, size=(2, 1))\n H = np.append(R, T, axis=1)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n print(\"Points 2d translation + rotation:\\n\", H)\n points_list = np.array(list(zip(points.T, points_translated.T)))\n task_two(points_list)\n # Second test\n H = np.random.rand(3, 3)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n # Normalize it\n points = np.random.rand(3, 4)\n tetta = np.random.uniform(low=0, high=2 * np.pi, size=(1,))[0]\n R = np.array([[np.cos(tetta), -np.sin(tetta), 0],\n [np.sin(tetta), np.cos(tetta), 0],\n [0, 0, 1]])\n T = np.random.uniform(low=0, high=3, size=(3, 1))\n H = np.append(R, T, axis=1)\n print(\"Points 3d translation + rotation:\\n\", H)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n # Convert to p2\n norm = lambda x: [x[0] / x[2], x[1] / x[2]]\n points = np.array([norm(x) for x in points.T]).T\n points_translated = np.array([norm(x) for x in points_translated.T]).T\n points_list = np.array(list(zip(points.T, points_translated.T)))\n task_two(points_list)", "def transform():", "def tt_tt_flat_inner(tt_a, tt_b):\n\n if tt_a.is_tt_matrix() != tt_b.is_tt_matrix():\n raise ValueError('One of the arguments is a TT-tensor, the other is '\n 'a TT-matrix, disallowed')\n are_both_matrices = tt_a.is_tt_matrix() and tt_b.is_tt_matrix()\n\n # TODO: compare shapes and raise if not consistent.\n\n ndims = tt_a.ndims\n if tt_b.ndims != ndims:\n raise ValueError('Arguments should have the same number of dimensions, '\n 'got %d and %d instead.' % (ndims, tt_b.ndims()))\n\n axes_str = 'ij' if are_both_matrices else 'i'\n # Convert BatchSize 1 batch into TT object to simplify broadcasting.\n # tt_a = shapes.squeeze_batch_dim(tt_a)\n # tt_b = shapes.squeeze_batch_dim(tt_b)\n is_a_batch = isinstance(tt_a, TensorTrainBatch)\n is_b_batch = isinstance(tt_b, TensorTrainBatch)\n is_res_batch = is_a_batch or is_b_batch\n a_batch_str = 'o' if is_a_batch else ''\n b_batch_str = 'o' if is_b_batch else ''\n res_batch_str = 'o' if is_res_batch else ''\n init_einsum_str = '{1}a{0}b,{2}c{0}d->{3}bd'.format(axes_str, a_batch_str,\n b_batch_str,\n res_batch_str)\n a_core = tt_a.tt_cores[0]\n b_core = tt_b.tt_cores[0]\n # Simplest example of this operation:\n # if both arguments are TT-tensors, then it is\n # res = tf.einsum('aib,cid->bd', a_core, b_core)\n res = torch.einsum(init_einsum_str, a_core, b_core)\n\n einsum_str = '{3}ac,{1}a{0}b,{2}c{0}d->{3}bd'.format(axes_str, a_batch_str,\n b_batch_str,\n res_batch_str)\n for core_idx in range(1, ndims):\n a_core = tt_a.tt_cores[core_idx]\n b_core = tt_b.tt_cores[core_idx]\n # Simplest example of this operation:\n # if both arguments are TT-tensors, then it is\n # res = tf.einsum('ac,aib,cid->bd', res, a_core, b_core)\n res = torch.einsum(einsum_str, res, a_core, b_core)\n return torch.squeeze(res)", "def __str__(self) -> str:\n st = \"\\tmat = \" + self.mat\n st += \"\\n\\trotation = \" + str(self.ham_rot) + '\\n'\n pl_str = ['(' + p.join(' ') + ')' for p in self.planes]\n st += '\\tplane: ' + \", \".join(pl_str) + '\\n'\n return st", "def test_tranform_chain() -> None:\n transform_chain = TransformChain(\n input_variables=[\"first_name\", \"last_name\"],\n output_variables=[\"greeting\"],\n transform=dummy_transform,\n )\n input_dict = {\"first_name\": \"Leroy\", \"last_name\": \"Jenkins\"}\n response = transform_chain(input_dict)\n expected_response = {\"greeting\": \"Leroy Jenkins says hello\"}\n assert response == expected_response", "def __str__(self):\n left = ''\n right = ''\n for i in range(len(self.ant)):\n left += Prop.__str__(self.ant[i]) + \", \"\n \n for i in range(len(self.con)):\n right += Prop.__str__(self.con[i]) + \", \"\n return left[:-2] + '|-- ' + right[:-2]", "def __str__(self):\n return 'Tensor product {}: {} params, wires {}'.format([i.name for i in self.obs], len(self.params), self.wires)", "def test_print_op():\r\n b = tensor.fmatrix()\r\n f = theano.function([b],theano.printing.Print()(b)*2, mode=mode_with_gpu)\r\n #theano.printing.debugprint(f)\r\n #print f.maker.fgraph.toposort()\r\n#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]\r\n topo = f.maker.fgraph.toposort()\r\n assert topo[0].op == cuda.gpu_from_host\r\n assert isinstance(topo[1].op, theano.printing.Print)\r\n assert isinstance(topo[2].op, cuda.GpuElemwise)\r\n assert topo[3].op == cuda.host_from_gpu\r\n f(numpy.random.random((5,5)).astype('float32'))", "def transform(self, X, Y):\n\n X_star = self.X_tranform.dot(X.T).T\n Y_star = self.Y_tranform.dot(Y.T).T\n\n return X_star, Y_star", "def compose(self, other, qargs=None, front=False):\n pass", "def output(context, targets):\n # context: [[('p', ['a', 'b'])], ...]\n # targets: [(('p', ['a', 'b']), 1, [0,1,2]), ...]\n print('\\n'.join([write_r(c) for c in context]))\n for t, v, s in targets:\n print(TARGET_T.format(write_r([t]), v, ','.join(map(str, s))))", "def TwoStage(Ref,Q,Te,Tc,DTsh,DTsc,eta_oi,f_p,Tsat_ic,DTsh_ic,Ts_Ph='Ph',prints=False,skipPlot=False,axis=None,**kwargs):\n\n warnings.warn(\"This function has been deprecated. PLease consider converting it to an object inheriting from \\\"BaseCycle\\\".\",DeprecationWarning)\n\n T=np.zeros((8))\n h=np.zeros_like(T)\n p=np.zeros_like(T)\n s=np.zeros_like(T)\n rho=np.zeros_like(T)\n T[0]=np.NAN\n s[0]=np.NAN\n T[1]=Te+DTsh\n pe=PropsSI('P','T',Te,'Q',1.0,Ref)\n pc=PropsSI('P','T',Tc,'Q',1.0,Ref)\n pic=PropsSI('P','T',Tsat_ic,'Q',1.0,Ref)\n Tbubble_c=PropsSI('T','P',pc,'Q',0,Ref)\n Tbubble_e=PropsSI('T','P',pe,'Q',0,Ref)\n\n h[1]=PropsSI('H','T',T[1],'P',pe,Ref)\n s[1]=PropsSI('S','T',T[1],'P',pe,Ref)\n rho[1]=PropsSI('D','T',T[1],'P',pe,Ref)\n T[5]=Tbubble_c-DTsc\n h[5]=PropsSI('H','T',T[5],'P',pc,Ref)\n s[5]=PropsSI('S','T',T[5],'P',pc,Ref)\n rho[5]=PropsSI('D','T',T[5],'P',pc,Ref)\n mdot=Q/(h[1]-h[5])\n\n rho1=PropsSI('D','T',T[1],'P',pe,Ref)\n h2s=PropsSI('H','S',s[1],'P',pic,Ref)\n Wdot1=mdot*(h2s-h[1])/eta_oi\n h[2]=h[1]+(1-f_p)*Wdot1/mdot\n T[2]=PropsSI('T','H',h[2],'P',pic,Ref)\n s[2]=PropsSI('S','T',T[2],'P',pic,Ref)\n rho[2]=PropsSI('D','T',T[2],'P',pic,Ref)\n T[3]=288\n p[3]=pic\n h[3]=PropsSI('H','T',T[3],'P',pic,Ref)\n s[3]=PropsSI('S','T',T[3],'P',pic,Ref)\n rho[3]=PropsSI('D','T',T[3],'P',pic,Ref)\n rho3=PropsSI('D','T',T[3],'P',pic,Ref)\n h4s=PropsSI('H','T',s[3],'P',pc,Ref)\n Wdot2=mdot*(h4s-h[3])/eta_oi\n h[4]=h[3]+(1-f_p)*Wdot2/mdot\n T[4]=PropsSI('T','H',h[4],'P',pc,Ref)\n s[4]=PropsSI('S','T',T[4],'P',pc,Ref)\n rho[4]=PropsSI('D','T',T[4],'P',pc,Ref)\n\n sbubble_e=PropsSI('S','T',Tbubble_e,'Q',0,Ref)\n sbubble_c=PropsSI('S','T',Tbubble_c,'Q',0,Ref)\n sdew_e=PropsSI('S','T',Te,'Q',1,Ref)\n sdew_c=PropsSI('S','T',Tc,'Q',1,Ref)\n\n hsatL=PropsSI('H','T',Tbubble_e,'Q',0,Ref)\n hsatV=PropsSI('H','T',Te,'Q',1,Ref)\n ssatL=PropsSI('S','T',Tbubble_e,'Q',0,Ref)\n ssatV=PropsSI('S','T',Te,'Q',1,Ref)\n vsatL=1/PropsSI('D','T',Tbubble_e,'Q',0,Ref)\n vsatV=1/PropsSI('D','T',Te,'Q',1,Ref)\n x=(h[5]-hsatL)/(hsatV-hsatL)\n s[6]=x*ssatV+(1-x)*ssatL\n T[6]=x*Te+(1-x)*Tbubble_e\n rho[6]=1.0/(x*vsatV+(1-x)*vsatL)\n\n h[6]=h[5]\n h[7]=h[1]\n s[7]=s[1]\n T[7]=T[1]\n p=[np.nan,pe,pic,pic,pc,pc,pe,pe]\n COP=Q/(Wdot1+Wdot2)\n RE=h[1]-h[6]\n\n if prints==True:\n print('x5:',x)\n print('COP:', COP)\n print('COPH', (Q+Wdot1+Wdot2)/(Wdot1+Wdot2))\n print(T[2]-273.15,T[4]-273.15,p[2]/p[1],p[4]/p[3])\n print(mdot,mdot*(h[4]-h[5]),pic)\n print('Vdot1',mdot/rho1,'Vdisp',mdot/rho1/(3500/60.)*1e6/0.7)\n print('Vdot2',mdot/rho3,'Vdisp',mdot/rho3/(3500/60.)*1e6/0.7)\n print(mdot*(h[4]-h[5]),Tc-273.15)\n for i in range(1,len(T)-1):\n print('%d & %g & %g & %g & %g & %g \\\\\\\\' %(i,T[i]-273.15,p[i],h[i],s[i],rho[i]))\n else:\n print(Tsat_ic,COP)\n\n if skipPlot==False:\n if axis==None:\n ax=matplotlib.pyplot.gca()\n else:\n ax=axis\n if Ts_Ph in ['ph','Ph']:\n ax.plot(h,p)\n elif Ts_Ph in ['Ts','ts']:\n s_copy=s.copy()\n T_copy=T.copy()\n for i in range(1,len(s)-1):\n ax.plot(s[i],T[i],'bo',mfc='b',mec='b')\n dT=[0,-5,5,-20,5,5,5]\n ds=[0,0.05,0,0,0,0,0]\n ax.text(s[i]+ds[i],T[i]+dT[i],str(i))\n\n s=list(s)\n T=list(T)\n s.insert(7,sdew_e)\n T.insert(7,Te)\n s.insert(5,sbubble_c)\n T.insert(5,Tbubble_c)\n s.insert(5,sdew_c)\n T.insert(5,Tc)\n\n ax.plot(s,T)\n s=s_copy\n T=T_copy\n else:\n raise TypeError('Type of Ts_Ph invalid')\n return COP", "def __mul__(self, other):\n # combined rotation is the product of the two rotations (Rself*Rother):\n v1 = self.pure\n v2 = other.pure\n real = self.real * other.real - \\\n numpy.inner(v1, v2)\n cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]\n cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]\n cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]\n pure = numpy.array([cofactor1, cofactor2, cofactor3]) \\\n + self.real * other.pure \\\n + other.real * self.pure\n # combined translation\n trans = self.getQuaternion().apply(other.trans) + self.trans\n trans[3] = 1.\n return Transformation(trans=trans, quaternion=(real, pure))", "def trotter(P=\"{P:d}\", B=\"{B:d}\"):\n return _trotter.format(P, B)", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)", "def view(self):\r\n\t\t\r\n\t\t# add zero term\r\n\t\tt = self\r\n\t\tif t == []:\r\n\t\t\tt = [Te(0)]\r\n\t\t\r\n\t\t# display\r\n\t\tfor i in t:\r\n\t\t\ti.view()\r\n\t\t\t\r\n\t\t# spacer\r\n\t\tprint(' ')\r\n\t\t\t\r\n\t\treturn None", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def draw_registration_result(source, target, transformation):\n source_temp = copy.deepcopy(source)\n target_temp = copy.deepcopy(target)\n source_temp.paint_uniform_color([1, 0.706, 0])\n target_temp.paint_uniform_color([0, 0.651, 0.929])\n source_temp.transform(transformation)\n open3d.visualization.draw_geometries([source_temp, target_temp])", "def test_display__method2(self):\n Rectangle.reset_objects()\n s2 = Square(2, 2)\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n s2.display()\n self.assertEqual(f.getvalue(), \" ##\\n ##\\n\")", "def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()", "def compose_transforms(*transforms):\n from functools import reduce\n\n for transform in transforms:\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n if len(transforms) == 0:\n return np.eye(4)\n\n return reduce(np.dot, reversed(transforms))", "def print_results(list_object1, list_object2):\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n\n print()\n print(\"{:>{}}\".format(\"Student ID\",STUDENT_COLUMN),end=\"\")\n\n for i in range(len(list_object1)):\n print(\"{:>{}}\".format(list_object1[i][0],GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(\"Course grade\",GENERAL_COLUMN))\n\n for tuple_element in list_object2:\n\n print(\"{:>{}}\".format(tuple_element[0],STUDENT_COLUMN),end=\"\")\n\n for i, value in enumerate(tuple_element[1]):\n print(\"{:>{}}\".format(value,GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(round(tuple_element[-1],2),GENERAL_COLUMN))", "def dot_prod(t1: torch.Tensor, t2: torch.Tensor, verbose: bool = False):\n assert t1.size() == t2.size(), \"Sizes for dot-product must match\"\n return mo.dot_prod(t1, t2, verbose)", "def test_repr(Group: Type[jaxlie.MatrixLieGroup]):\n transform = sample_transform(Group)\n print(transform)", "def __repr__(self):\n return \"(%.2f, %.2f) <-> %s(%.2f, %.2f)%s <-> (%.2f, %.2f) %s\" % (\n self.prev.x, self.prev.y,\n 'i' if self.intersect else ' ',\n self.x, self.y,\n ('e' if self.entry else 'x') if self.intersect else ' ',\n self.next.x, self.next.y,\n ' !' if self.intersect and not self.checked else ''\n )", "def mirrorTransformations_Custom(self):\n\n pass", "def sentence_encoding_rnn_phi(t1, t2):\n return (t1.leaves(), t2.leaves())", "def printGraph(self):\n print \"-----\"\n for feature in self.features:\n feature.printFeature()\n for constraint in self.constraints:\n constraint.printConstraint()\n print \"-----\"", "def compose(transforms):\n assert isinstance(transforms, list)\n for transform in transforms:\n assert callable(transform), \"list of functions expected\"\n\n def composition(obj):\n \"Composite function\"\n for transform in transforms:\n obj = transform(obj)\n return obj\n return composition", "def __str__(self):\n s = \"\"\n for i in range(13,25):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n s += '\\n'\n for i in range(12, 0,-1):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n return s", "def __repr__(self):\n # first check for identity quaternion to avoid nans\n if self.real != 1:\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n else:\n angle = 0.\n xyz = self.pure\n result = \"Transformation: tx ty tz rx ry rz angle\\n %g %g %g %g %g %g %g\" \\\n % (self.trans[0], self.trans[1], self.trans[2],\n xyz[0], xyz[1], xyz[2], angle)\n return result", "def test_transform_album_with_two_transforms(self):\n album = Album(artist='Artist', album='Album')\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(2,\n cond_album=True, pattern_album='Album',\n change_album=True, to_album='Album 2',\n ))\n\n self.assertEqual(album.last_transform, 0)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)\n\n tflist.apply_album(album)\n\n self.assertEqual(album.last_transform, 2)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, True)", "def _calculate_composite_transforms(self):\n\n self._calculate_similarity()\n # Finally, calculate composite transforms\n commands = []\n for moving_slice_index in self.options.slice_range:\n commands.append(self._calculate_composite(moving_slice_index))\n self.execute(commands)\n\n self._logger.info(\"Done with calculating the transformations.\")", "def test_print_op():\r\n b = tensor.fmatrix()\r\n f = theano.function([b], theano.printing.Print()(b) * 2,\r\n mode=mode_with_gpu)\r\n theano.printing.debugprint(f)\r\n #print f.maker.fgraph.toposort()\r\n#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]\r\n topo = f.maker.fgraph.toposort()\r\n assert topo[0].op == gpu_from_host\r\n assert isinstance(topo[1].op, theano.printing.Print)\r\n assert isinstance(topo[2].op, GpuElemwise)\r\n assert topo[3].op == host_from_gpu\r\n f(numpy.random.random((5, 5)).astype('float32'))", "def pprint(self, composites):\n\n lines = []\n\n # if only a single composite is used then cast to list\n if not isinstance(composites, list):\n composites = [composites]\n\n # allow for multiple root composites\n for composite in composites:\n lines += self._format(composite)\n\n return self.newlinechar.join(lines)", "def print_transition(start, symbol, end):\n template_string = \"{{{}}} --{}--> {{{}}}\"\n print template_string.format(\",\".join(map(str, start)),\n symbol,\n \",\".join(map(str, end)))", "def print_operation(operations):\n for operation in operations:\n print ' ',\n change_color_by_tag(operation)\n if operation['ExtAttributes']:\n print_extattributes_of_member(operation['ExtAttributes'])\n print operation['Type'],\n if operation['Arguments']:\n print operation['Name'],\n print_argument(operation['Arguments'])\n else:\n print operation['Name']", "def __str__(self):\n return \"s(\" + str(self.p1) + \",\" + str(self.p2) + \")\"", "def print_models(G_XtoY, G_YtoX, D_X, D_Y):\n print(\" G_XtoY \")\n print(\"-----------------------------------------------\")\n print(G_XtoY)\n print()\n\n print(\" G_YtoX \")\n print(\"-----------------------------------------------\")\n print(G_YtoX)\n print()\n\n print(\" D_X \")\n print(\"-----------------------------------------------\")\n print(D_X)\n print()\n\n print(\" D_Y \")\n print(\"-----------------------------------------------\")\n print(D_Y)\n print()", "def main():\n conf_matrix1 = one_vs_all()\n conf_matrix2 = all_vs_all()\n results = my_info() + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix1)) + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix2))\n print results + '\\t\\t'\n\n # sum = 0\n #\n # for i in range(len(conf_matrix1)):\n # sum += conf_matrix1[i][i]\n #\n # print \"One-vs-All corecct classifications: \", sum\n #\n # sum = 0\n #\n # for i in range(len(conf_matrix2)):\n # sum += conf_matrix2[i][i]\n #\n # print \"All-vs-All correct classificatinos: \", sum\n\n #print(\"onevsall\")\n #print_latex_table(conf_matrix1)\n #print(\"allvsall\")\n #print_latex_table(conf_matrix2)", "def print(self):\n tiles = list(map(list, zip(*self.tiles))) # transposed\n print('tiles = [')\n for row in tiles:\n print('\\t' + repr(row))\n print(']')\n print('props = [')\n for prop in self.props:\n print('\\t' + repr(prop))\n print(']')", "def printXY(self):\n print zip(self.x, self.y)", "def format_transfer_case_two(self, tik_instance):\n ub_ori_data = self.ub_memory\n ub_trans_data = ub_ori_data\n loop_memory = ub_ori_data - ub_ori_data % \\\n (CUBE_SIZE * CUBE_SIZE * self.dst_shape[-3] + CUBE_SIZE)\n num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \\\n self.dst_shape[-2] * self.dst_shape[-1]\n num_data_one_loop_padding = num_data_one_loop + self.dst_shape[-4] * \\\n self.dst_shape[-1]\n loop_times = (num_data_one_loop_padding + loop_memory - 1) // \\\n loop_memory\n if len(self.dst_shape) == 4:\n total_core_loop_num = loop_times\n else:\n total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,\n self.dst_shape[:-4]) * \\\n loop_times\n core_number = _set_core_num(total_core_loop_num)\n\n with tik_instance.for_range(0, core_number, block_num=core_number) \\\n as num_core:\n ub_ori = tik_instance.Tensor(self.dtype,\n (ub_ori_data,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (ub_trans_data,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,\n num_core, core_number)\n\n src_ub_index = 0\n\n with tik_instance.for_range(0, core_loop) as num_core_loop:\n total_core_loop = sum_core + num_core_loop\n num_loop_time = total_core_loop % loop_times\n num_outer_axis = (total_core_loop - num_loop_time) // \\\n loop_times\n\n handling_times = tik_instance.Scalar(\"uint64\")\n is_last = tik_instance.Scalar(\"uint64\")\n is_last.set_as(0)\n handling_times.set_as(loop_memory //\n (CUBE_SIZE * CUBE_SIZE *\n self.dst_shape[-3] + CUBE_SIZE))\n with tik_instance.if_scope(num_loop_time == loop_times - 1):\n if num_data_one_loop_padding % loop_memory == 0:\n remainder = loop_memory\n else:\n remainder = num_data_one_loop_padding % loop_memory\n handling_times.set_as((remainder + CUBE_SIZE * CUBE_SIZE *\n self.dst_shape[-3] +\n CUBE_SIZE - 1) //\n (CUBE_SIZE * CUBE_SIZE *\n self.dst_shape[-3] + CUBE_SIZE))\n is_last.set_as(1)\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + loop_memory // \\\n (CUBE_SIZE * self.dst_shape[-3] + 1) * \\\n num_loop_time\n with tik_instance.for_range(0, self.src_shape[-2] //\n MAX_BURST_NUMBER) as num_repeat:\n tik_instance.data_move(ub_ori[src_ub_index +\n MAX_BURST_NUMBER *\n num_repeat *\n (loop_memory //\n (CUBE_SIZE *\n self.dst_shape[-3] + 1))],\n self.src_gm[src_gm_index +\n MAX_BURST_NUMBER *\n num_repeat *\n self.src_shape[-1]],\n 0, MAX_BURST_NUMBER,\n handling_times * self.num_byte // 2,\n (self.src_shape[-1] -\n handling_times * CUBE_SIZE +\n self.num_data - 1) //\n self.num_data, 0)\n with tik_instance.if_scope(self.src_shape[-2] %\n MAX_BURST_NUMBER != 0):\n tik_instance.data_move(ub_ori[src_ub_index +\n (self.src_shape[-2] //\n MAX_BURST_NUMBER) *\n MAX_BURST_NUMBER *\n (loop_memory //\n (CUBE_SIZE *\n self.dst_shape[-3] + 1))],\n self.src_gm[src_gm_index +\n (self.src_shape[-2] //\n MAX_BURST_NUMBER) *\n MAX_BURST_NUMBER *\n self.src_shape[-1]], 0,\n self.src_shape[-2] %\n MAX_BURST_NUMBER,\n handling_times * self.num_byte // 2,\n (self.src_shape[-1] -\n handling_times * CUBE_SIZE +\n self.num_data - 1) //\n self.num_data, 0)\n self.data_rearrange_case_one(tik_instance, ub_ori, ub_trans,\n handling_times, is_last)\n dst_gm_index = num_outer_axis * num_data_one_loop + \\\n loop_memory // (CUBE_SIZE * CUBE_SIZE *\n self.dst_shape[-3] +\n CUBE_SIZE) * \\\n (CUBE_SIZE * CUBE_SIZE *\n self.dst_shape[-3]) * num_loop_time\n tik_instance.data_move(self.dst_gm[dst_gm_index], ub_trans[0],\n 0, handling_times,\n CUBE_SIZE * self.dst_shape[-3] *\n CUBE_SIZE // self.num_data,\n self.num_byte // 2, 0)\n\n return tik_instance", "def __init__(self, transforms):\n super().__init__(transforms)\n self.S1 = transforms[0]\n self.T1 = transforms[1]\n self.S2 = transforms[2]\n self.T2 = transforms[3]", "def transform():\n pass", "def to_transfac(self):\n m = \"%s\\t%s\\t%s\\n\" % (\"DE\", self.id, \"unknown\")\n for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())):\n m += \"%i\\t%s\\t%s\\n\" % (i, \"\\t\".join([str(int(x)) for x in row]), cons)\n m += \"XX\"\n return m" ]
[ "0.6846836", "0.593155", "0.58877105", "0.5660713", "0.5565579", "0.5429697", "0.5382136", "0.5374937", "0.5349274", "0.5291017", "0.5267222", "0.5266499", "0.52166545", "0.5211318", "0.5192603", "0.51735705", "0.5163085", "0.5161964", "0.51475674", "0.5144708", "0.513714", "0.5115896", "0.5102741", "0.5101016", "0.5072932", "0.5072235", "0.50717944", "0.50700873", "0.50699806", "0.50620157", "0.5061686", "0.50529015", "0.50525564", "0.5032917", "0.5027349", "0.5016399", "0.5016391", "0.50003225", "0.4997889", "0.4997301", "0.49633837", "0.4959434", "0.4956094", "0.49473572", "0.49421507", "0.4928063", "0.49067932", "0.49044043", "0.49022365", "0.48906925", "0.4884883", "0.4863719", "0.48602065", "0.48514533", "0.4851371", "0.48472422", "0.48404026", "0.48394448", "0.48379388", "0.48373678", "0.4833436", "0.4822371", "0.48223448", "0.4820648", "0.48178667", "0.4816837", "0.4804004", "0.48036394", "0.4796703", "0.4796038", "0.47940812", "0.4793377", "0.47852796", "0.47729027", "0.47717988", "0.47704896", "0.47662687", "0.47502336", "0.47496367", "0.47491646", "0.47450876", "0.47429234", "0.47414285", "0.47385138", "0.47364736", "0.47279817", "0.47258937", "0.47139633", "0.47136605", "0.47123498", "0.47116238", "0.47072026", "0.47027206", "0.46994227", "0.46922225", "0.4689057", "0.46884847", "0.46817943", "0.46764114", "0.4676037" ]
0.54261017
6
Compute the composition of two transformations T2T1
def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2): R1 = Transform({"type":"spider","phi":float(phi1),"theta":float(theta1),"psi":float(psi1),"tx":float(sx1),"ty":float(sy1),"tz":float(sz1),"mirror":0,"scale":float(scale1)}) R2 = Transform({"type":"spider","phi":float(phi2),"theta":float(theta2),"psi":float(psi2),"tx":float(sx2),"ty":float(sy2),"tz":float(sz2),"mirror":0,"scale":float(scale2)}) Rcomp=R2*R1 d = Rcomp.get_params("spider") return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["scale"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]", "def compose_transform2(alpha1, sx1, sy1, scale1, alpha2, sx2, sy2, scale2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":0,\"scale\":scale1})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":0,\"scale\":scale2})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"scale\" ]", "def compose(disp_1, disp_2, indexing='ij'):\n\n assert indexing == 'ij', \"currently only ij indexing is implemented in compose\"\n\n return disp_2 + transform(disp_1, disp_2, interp_method='linear', indexing=indexing)", "def compose(transforms):\n trans_objs = [TRANSFORMS.build(t) for t in transforms]\n return tv_transforms.Compose(trans_objs)", "def compose(RK1,RK2,h1=1,h2=1):\n f1=h1/(h1+h2)\n f2=h2/(h1+h2)\n A=np.vstack([\n np.hstack([RK2.A*f2,np.zeros([np.size(RK2.A,0),np.size(RK1.A,1)])]),\n np.hstack([np.tile(RK2.b*f2,(len(RK1),1)),RK1.A*f1])]).squeeze()\n b=np.hstack([RK2.b*f2,RK1.b*f1]).squeeze()\n if RK1.is_explicit() and RK2.is_explicit():\n return ExplicitRungeKuttaMethod(A,b)\n else:\n return RungeKuttaMethod(A,b)", "def __mul__(self, other):\n # combined rotation is the product of the two rotations (Rself*Rother):\n v1 = self.pure\n v2 = other.pure\n real = self.real * other.real - \\\n numpy.inner(v1, v2)\n cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]\n cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]\n cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]\n pure = numpy.array([cofactor1, cofactor2, cofactor3]) \\\n + self.real * other.pure \\\n + other.real * self.pure\n # combined translation\n trans = self.getQuaternion().apply(other.trans) + self.trans\n trans[3] = 1.\n return Transformation(trans=trans, quaternion=(real, pure))", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def __mul__(self, other : TransformType):\n return Transform(\n self.rotation * other.rotation,\n self.rotation * other.translation + self.translation)", "def _calculate_composite_transforms(self):\n\n self._calculate_similarity()\n # Finally, calculate composite transforms\n commands = []\n for moving_slice_index in self.options.slice_range:\n commands.append(self._calculate_composite(moving_slice_index))\n self.execute(commands)\n\n self._logger.info(\"Done with calculating the transformations.\")", "def compose(transforms):\n assert isinstance(transforms, list)\n for transform in transforms:\n assert callable(transform), \"list of functions expected\"\n\n def composition(obj):\n \"Composite function\"\n for transform in transforms:\n obj = transform(obj)\n return obj\n return composition", "def _compose_transforms(basis_transforms, source_basis, source_dag):\n example_gates = _get_example_gates(source_dag)\n mapped_instrs = {}\n\n for gate_name, gate_num_qubits in source_basis:\n # Need to grab a gate instance to find num_qubits and num_params.\n # Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .\n example_gate = example_gates[gate_name, gate_num_qubits]\n num_params = len(example_gate.params)\n\n placeholder_params = ParameterVector(gate_name, num_params)\n placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))\n placeholder_gate.params = list(placeholder_params)\n\n dag = DAGCircuit()\n qr = QuantumRegister(gate_num_qubits)\n dag.add_qreg(qr)\n dag.apply_operation_back(placeholder_gate, qr[:], [])\n mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag\n\n for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:\n logger.debug(\n \"Composing transform step: %s/%s %s =>\\n%s\",\n gate_name,\n gate_num_qubits,\n equiv_params,\n equiv,\n )\n\n for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():\n doomed_nodes = [\n node\n for node in dag.op_nodes()\n if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)\n ]\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updating transform for mapped instr %s %s from \\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n for node in doomed_nodes:\n\n replacement = equiv.assign_parameters(\n dict(zip_longest(equiv_params, node.op.params))\n )\n\n replacement_dag = circuit_to_dag(replacement)\n\n dag.substitute_node_with_dag(node, replacement_dag)\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updated transform for mapped instr %s %s to\\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n return mapped_instrs", "def __mul__(self, other):\r\n if isinstance(other, tuple):\r\n return self.transform_point(other)\r\n if isinstance(other, LinearTransformation):\r\n return self.left_composition(other)\r\n else:\r\n print(other, type(other))\r\n raise NotImplementedError", "def test_transform_compose(self):\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n\n p = s.make_point((2/11, 6/11, 9/11), magic)\n q = s.make_point((3/7, 6/7, 2/7), magic)\n r = s.make_point((9/17, 8/17, 12/17), magic)\n\n f, g, h = map(space_point_transform, (p, q, r))\n\n # check the core principle: (f g) x = f (g x)\n self.assertTrue(point_isclose(\n (f(g))(r),\n f(g(r))\n ))\n\n # just for good measure, let's do it again with different vars\n self.assertTrue(point_isclose(\n (g(h))(p),\n g(h(p))\n ))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (p, q, r):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref)\n ))\n\n # api says f(g) == f + g\n # this is just a convenience to let you write things with a sum instead of a product\n check_transform_eq(f(g), f + g)\n\n # non-commutative property\n check_transform_eq(f+g, g+f, invert=(k!=0))\n\n # associative property\n check_transform_eq(f+g+h, f+(g+h))\n\n # self commutative property\n f2 = f+f\n check_transform_eq(f2+f, f+f2)\n check_transform_eq(f2+f2, f+f2+f)", "def Compose(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_Compose(self, *args)", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def estimate_stage_affine(t0, t1):\n src = np.array([t.tforms[0].translation for t in t0])\n dst = np.array([t.tforms[1].translation for t in t1])\n aff = renderapi.transform.AffineModel()\n aff.estimate(src, dst)\n return aff", "def compose_transforms(*transforms):\n from functools import reduce\n\n for transform in transforms:\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n if len(transforms) == 0:\n return np.eye(4)\n\n return reduce(np.dot, reversed(transforms))", "def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):\n\n\tt1 = Transform({\"type\":\"2D\",\"alpha\":alpha1,\"tx\":sx1,\"ty\":sy1,\"mirror\":mirror1,\"scale\":1.0})\n\tt2 = Transform({\"type\":\"2D\",\"alpha\":alpha2,\"tx\":sx2,\"ty\":sy2,\"mirror\":mirror2,\"scale\":1.0})\n\ttt = t2*t1\n\td = tt.get_params(\"2D\")\n\treturn d[ \"alpha\" ], d[ \"tx\" ], d[ \"ty\" ], d[ \"mirror\" ]", "def ConcatTransform(*args, **kwargs):\n return _gdi_.GraphicsContext_ConcatTransform(*args, **kwargs)", "def associate_comp(x, y):\n return torch.cat([x[:1] * y[:1] - x[1:] * y[1:], x[:1] * y[1:] + x[1:] * y[:1]])", "def construct(self, x1, x2):\n x1 = self.up(x1)\n x = self.concat((x1, x2))\n return self.conv(x)", "def apply_and_compare(self, image1_data, image2_data):\n\n return self.transformations_map[self.name](image1_data, image2_data)", "def calcT1(g2, g1):\n idop = FermiOp(g2.orbs, 3, 3)\n idop.data = np.eye(int(binom(g2.orbs, 3)))\n\n return p2N(g2, 3) - p2N(g1, 3) + idop", "def transform(self, X, Y):\n\n X_star = self.X_tranform.dot(X.T).T\n Y_star = self.Y_tranform.dot(Y.T).T\n\n return X_star, Y_star", "def _compose(self, left, right):\n pdb.set_trace()\n h = tf.concat(axis=0, values=[left, right])\n\n with tf.variable_scope(\"RNTN_main\", reuse=True):\n W = tf.get_variable(\"W\")\n b = tf.get_variable(\"b\")\n V = tf.get_variable(\"V\")\n\n # main neural tensor action\n # or tf.tensordot(V, h, axes=1), https://www.tensorflow.org/api_docs/python/tf/tensordot\n main_rntn_tmp = tf.matmul(tf.transpose(h), tf.reshape(V, [100, 100*50]))\n main_rntn_ret = tf.matmul(tf.reshape(main_rntn_tmp, [50,100]), h)\n\n composed = main_rntn_ret + tf.matmul(W, h) + b\n return tf.nn.relu(composed)", "def _calculate_composite(self, moving_slice_index):\n\n # The transformation chain is a sequence of pairs of (fixed, moving)\n # slices. This sequence links the reference slices with given moving\n # slice.\n transformation_chain = \\\n self._get_transformation_chain(moving_slice_index)\n\n # Initialize the partial transforms array and then collect all partial\n # transformations constituting given composite transformation.\n partial_transformations = []\n for (m_slice, r_slice) in transformation_chain:\n partial_transformations.append(\n self.f['part_transf'](mIdx=m_slice, fIdx=r_slice))\n\n # Define the output transformation filename\n composite_transform_filename = \\\n self.f['comp_transf'](mIdx=moving_slice_index, fIdx=r_slice)\n\n # Initialize and define the composite transformation wrapper\n command = pos_wrappers.ants_compose_multi_transform(\n dimension=self.__IMAGE_DIMENSION,\n output_image=composite_transform_filename,\n deformable_list=[],\n affine_list=partial_transformations)\n\n return copy.deepcopy(command)", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3", "def _compose(self, other):\n if self.num_qubits != other.num_qubits:\n raise QiskitError(\"Multiplication on different number of qubits.\")\n result = CNOTDihedral(num_qubits=self.num_qubits)\n result.shift = [\n (x[0] + x[1]) % 2 for x in zip(self._z2matvecmul(other.linear, self.shift), other.shift)\n ]\n result.linear = self._z2matmul(other.linear, self.linear)\n # Compute x' = B1*x + c1 using the p_j identity\n new_vars = []\n for i in range(self.num_qubits):\n support = np.arange(other.num_qubits)[np.nonzero(self.linear[i])]\n poly = SpecialPolynomial(self.num_qubits)\n poly.set_pj(support)\n if self.shift[i] == 1:\n poly = -1 * poly\n poly.weight_0 = (poly.weight_0 + 1) % 8\n new_vars.append(poly)\n # p' = p1 + p2(x')\n result.poly = self.poly + other.poly.evaluate(new_vars)\n return result", "def geom_trans(cls, Y1, Y2):\n return super().geom_trans(Y1, Y2)", "def commutator(A, B):\n return A @ B - B @ A", "def calculate_transformation(self, p: np.ndarray, o: np.ndarray):\n self.set_inputs(p)\n self.set_outputs(o)\n self.reset_transformation_to_rest()\n self.reset_output_transformation_to_rest()\n # activation resets the hidden layer to rest (unless primed)\n self.activation(clamps = ['input', 'output'])\n return np.copy(self.t)[0]", "def create_composite(dim, transformations):\n compos = sitk.Transform(dim, sitk.sitkIdentity)\n for transformation in transformations:\n compos.AddTransform(transformation)\n return compos", "def CombineRotation(a, b):\n # Use matrix multiplication: c = b*a.\n # We put 'b' on the left and 'a' on the right because,\n # just like when you use a matrix M to rotate a vector V,\n # you put the M on the left in the product M*V.\n # We can think of this as 'b' rotating all the 3 column vectors in 'a'.\n\n return RotationMatrix([\n [\n b.rot[0][0]*a.rot[0][0] + b.rot[1][0]*a.rot[0][1] + b.rot[2][0]*a.rot[0][2],\n b.rot[0][1]*a.rot[0][0] + b.rot[1][1]*a.rot[0][1] + b.rot[2][1]*a.rot[0][2],\n b.rot[0][2]*a.rot[0][0] + b.rot[1][2]*a.rot[0][1] + b.rot[2][2]*a.rot[0][2]\n ],\n [\n b.rot[0][0]*a.rot[1][0] + b.rot[1][0]*a.rot[1][1] + b.rot[2][0]*a.rot[1][2],\n b.rot[0][1]*a.rot[1][0] + b.rot[1][1]*a.rot[1][1] + b.rot[2][1]*a.rot[1][2],\n b.rot[0][2]*a.rot[1][0] + b.rot[1][2]*a.rot[1][1] + b.rot[2][2]*a.rot[1][2]\n ],\n [\n b.rot[0][0]*a.rot[2][0] + b.rot[1][0]*a.rot[2][1] + b.rot[2][0]*a.rot[2][2],\n b.rot[0][1]*a.rot[2][0] + b.rot[1][1]*a.rot[2][1] + b.rot[2][1]*a.rot[2][2],\n b.rot[0][2]*a.rot[2][0] + b.rot[1][2]*a.rot[2][1] + b.rot[2][2]*a.rot[2][2]\n ]\n ])", "def makeTransformations(epsg1, epsg2):\n sr1 = osr.SpatialReference()\n sr1.ImportFromEPSG(epsg1)\n preventGdal3axisSwap(sr1)\n sr2 = osr.SpatialReference()\n sr2.ImportFromEPSG(epsg2)\n preventGdal3axisSwap(sr2)\n tr1to2 = osr.CoordinateTransformation(sr1, sr2)\n tr2to1 = osr.CoordinateTransformation(sr2, sr1)\n return (tr1to2, tr2to1)", "def compose1(*functions: _ComposeArg[_T]) -> _Transform[_T]:\n def composition(arg, **kwargs):\n for f in reversed(functions):\n if isinstance(f, tuple):\n f, kws = f\n arg = f(arg, **{kw: kwargs[kw] for kw in kws})\n else:\n arg = f(arg)\n return arg\n return composition", "def cat(ts1: Tensor, ts2: Tensor, axes: int) -> Tensor:\n cat_op = CatOp(axes)\n return cat_op(ts1, ts2)", "def simple_chained_rep_rnn_phi(t1, t2):\n return t1.leaves() + t2.leaves()", "def diffeo_compose(a, b):\n c = np.empty_like(a)\n c[:, :, 0] = diffeo_apply(b, a[:, :, 0])\n c[:, :, 1] = diffeo_apply(b, a[:, :, 1])\n return c", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def register_com(vol_a: Volume, vol_b: Volume) -> Tuple[Volume, Volume]:\n from dipy.align.imaffine import transform_centers_of_mass\n\n affine = transform_centers_of_mass(vol_a, vol_a.grid_to_world, vol_b, vol_b.grid_to_world)\n\n vol_b.world_transform[:] = np.array(affine.affine)\n return vol_a, vol_b", "def o2transform(self, x, w):\n\n o2t = lambda x, w: K.dot(w, K.dot(x, K.transpose(w)))\n return tf.map_fn(o2t, [x, w])", "def transform_images(img1,img2):", "def correlation(in0, in1, parallel_iterations=None):\n \n return k.layers.Lambda(lambda inputs: tf.map_fn(\n fn=lambda inps: tf.squeeze(\n tf.nn.conv2d(\n tf.expand_dims(inps[0], 0), \n tf.expand_dims(inps[1], -1), \n strides=1, \n padding='VALID'\n ),\n axis=0,\n ), \n elems=inputs, \n parallel_iterations=parallel_iterations,\n fn_output_signature=in0.dtype,\n )\n )((in0,in1))", "def compose(self, point_a, point_b):\n raise NotImplementedError('The Lie group composition'\n ' is not implemented.')", "def forward_transform(self):\n\n if self._pipeline:\n #return functools.reduce(lambda x, y: x | y, [step[1] for step in self._pipeline[: -1]])\n return functools.reduce(lambda x, y: x | y, [step.transform for step in self._pipeline[:-1]])\n else:\n return None", "def compose(self, other, qargs=None, front=False):\n pass", "def compose_rewire(phi1, phi2):\n # cannot compose when dimensions are wrong\n assert phi1.indim == phi2.outdim\n\n # it does not make sense to compose with phi1 a variable\n assert not (isinstance(phi1, amnet.Variable))\n\n # compute the list of descendants of phi1 and phi2\n desc1 = descendants(phi1)\n desc2 = descendants(phi2)\n\n # the trees should have no overlaps\n nodeids1 = set([id(d) for d in desc1])\n nodeids2 = set([id(d) for d in desc2])\n assert len(nodeids1) == len(desc1)\n assert len(nodeids2) == len(desc2)\n assert len(nodeids1 & nodeids2) == 0\n\n # determine the variables x1, x2 associated with phi1, phi2\n vars1 = [d for d in desc1 if isinstance(d, amnet.Variable)]\n vars2 = [d for d in desc2 if isinstance(d, amnet.Variable)]\n assert len(vars1) == 1\n assert len(vars2) == 1\n x1 = vars1[0]\n x2 = vars2[0]\n\n # TODO: rewire here", "def merge2_comp(x, y, weight=0.5):\n z = normalize_comp(weight * x + (1-weight) * y)\n return z", "def tensor_outer_product(tensor1, tensor2):\n shape_1 = tensor1.shape\n shape_2 = tensor2.shape\n s1 = len(shape_1)\n s2 = len(shape_2)\n \n shape_1 = shape_1 + (1, )*s2\n shape_2 = (1, )*s1 + shape_2\n return np.reshape(tensor1, shape_1) * np.reshape(tensor2, shape_2)", "def transformation_from_points(points1, points2):\n points1 = points1.astype(np.float64)\n points2 = points2.astype(np.float64)\n\n c1 = np.mean(points1, axis=0)\n c2 = np.mean(points2, axis=0)\n points1 -= c1\n points2 -= c2\n\n s1 = np.std(points1)\n s2 = np.std(points2)\n points1 /= s1\n points2 /= s2\n\n u, _, vt = np.linalg.svd(np.matmul(points1.T, points2))\n r = np.matmul(u, vt).T\n\n return np.hstack(((s2 / s1) * r, (c2.T - (s2 / s1) * np.matmul(r, c1.T)).reshape(2, -1)))", "def tt_tt_flat_inner(tt_a, tt_b):\n\n if tt_a.is_tt_matrix() != tt_b.is_tt_matrix():\n raise ValueError('One of the arguments is a TT-tensor, the other is '\n 'a TT-matrix, disallowed')\n are_both_matrices = tt_a.is_tt_matrix() and tt_b.is_tt_matrix()\n\n # TODO: compare shapes and raise if not consistent.\n\n ndims = tt_a.ndims\n if tt_b.ndims != ndims:\n raise ValueError('Arguments should have the same number of dimensions, '\n 'got %d and %d instead.' % (ndims, tt_b.ndims()))\n\n axes_str = 'ij' if are_both_matrices else 'i'\n # Convert BatchSize 1 batch into TT object to simplify broadcasting.\n # tt_a = shapes.squeeze_batch_dim(tt_a)\n # tt_b = shapes.squeeze_batch_dim(tt_b)\n is_a_batch = isinstance(tt_a, TensorTrainBatch)\n is_b_batch = isinstance(tt_b, TensorTrainBatch)\n is_res_batch = is_a_batch or is_b_batch\n a_batch_str = 'o' if is_a_batch else ''\n b_batch_str = 'o' if is_b_batch else ''\n res_batch_str = 'o' if is_res_batch else ''\n init_einsum_str = '{1}a{0}b,{2}c{0}d->{3}bd'.format(axes_str, a_batch_str,\n b_batch_str,\n res_batch_str)\n a_core = tt_a.tt_cores[0]\n b_core = tt_b.tt_cores[0]\n # Simplest example of this operation:\n # if both arguments are TT-tensors, then it is\n # res = tf.einsum('aib,cid->bd', a_core, b_core)\n res = torch.einsum(init_einsum_str, a_core, b_core)\n\n einsum_str = '{3}ac,{1}a{0}b,{2}c{0}d->{3}bd'.format(axes_str, a_batch_str,\n b_batch_str,\n res_batch_str)\n for core_idx in range(1, ndims):\n a_core = tt_a.tt_cores[core_idx]\n b_core = tt_b.tt_cores[core_idx]\n # Simplest example of this operation:\n # if both arguments are TT-tensors, then it is\n # res = tf.einsum('ac,aib,cid->bd', res, a_core, b_core)\n res = torch.einsum(einsum_str, res, a_core, b_core)\n return torch.squeeze(res)", "def over(input_a, input_b):\n\n comp = input_b.duplicate()\n input_a.premult()\n ImageBufAlgo.over(comp, input_a, input_b)\n\n if comp.has_error:\n print \"Error merging over:\", comp.geterror()\n\n return comp", "def forward_step_layer(t1, t2, activation_f=torchfun.relu):\n return batch_norm_tensor(activation_f(t1.bmm(t2)))", "def compose(*funcs):\n return reduce(lambda f, g: lambda x: f(g(x)), funcs[::-1])", "def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def tensormul(t1, t2):\n dim1 = t1.get_shape().as_list()[-1]\n dim2 = t2.get_shape().as_list()[-1]\n result_shape_tensors = tf.unstack(tf.shape(t1))\n result_shape_tensors[-1] = dim2\n result_shape_tensor = tf.stack(result_shape_tensors)\n t1 = tf.reshape(t1, [-1, dim1])\n result = tf.matmul(t1, t2)\n result = tf.reshape(result, result_shape_tensors)\n return result", "def __rmul__(self, other):\r\n if isinstance(other, tuple):\r\n return self.transform_point(other)\r\n if isinstance(other, LinearTransformation):\r\n return self.right_composition(other)\r\n else:\r\n raise NotImplementedError", "def getAffineTransform(self, coord1, coord2):\n # generate coord1 into A\n mat_A = np.zeros((2*coord1.shape[0], 6))\n coord1 = np.hstack([coord1, np.ones((coord1.shape[0], 1))])\n for i in range(coord1.shape[0]):\n row = coord1[i,:]\n row_block = block_diag(row, row)\n assert(row_block.shape == (2,6))\n mat_A[2*i:2*i+2, :] = row_block\n \n # generate coord2 into b\n vec_b = coord2.reshape(-1,1)\n\n # solve the least square\n pseudo_inv = np.linalg.inv(np.matmul(mat_A.T, mat_A))\n pseudo_inv = np.matmul(pseudo_inv, mat_A.T)\n affine_mat = np.matmul(pseudo_inv, vec_b)\n assert(affine_mat.shape == (6,1))\n \n return affine_mat.reshape(2,-1)", "def generate_transforms_data(image1_data, image2_data):\n\n transformations = dict(\n unchanged=Transform('unchanged', image1_data, image2_data),\n reflected_vert=Transform(\n 'reflected_vert', image1_data, image2_data),\n reflected_horiz=Transform(\n 'reflected_horiz', image1_data, image2_data),\n rotated_90=Transform('rotated_90', image1_data, image2_data),\n rotated_180=Transform('rotated_180', image1_data, image2_data),\n rotated_270=Transform('rotated_270', image1_data, image2_data)\n )\n\n transformations_names = [\n 'unchanged',\n 'reflected_vert',\n 'reflected_horiz',\n 'rotated_90',\n 'rotated_180',\n 'rotated_270'\n ]\n\n best_match = transformations['unchanged']\n for transformation in transformations_names:\n if transformations[transformation].match > best_match.match:\n best_match = transformations[transformation]\n if best_match.match > .98:\n break\n return {best_match.name: best_match}", "def compose2(a: Callable[[AA], AR], b: Callable[[BA], AA]) -> Callable[[BA], AR]:\n a_name = _name(a)\n b_name = _name(b)\n\n def c(arg: BA) -> AR:\n f\"Function composed as {a_name}({b_name}(_)).\"\n return a(b(arg))\n\n c.__name__ = f\"{a_name}∘{b_name}\"\n return c", "def crossproduct(first, other=FreeCAD.Vector(0,0,1)):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return FreeCAD.Vector(first.y*other.z - first.z*other.y, first.z*other.x - first.x*other.z, first.x*other.y - first.y*other.x)", "def transform_proj(p1, p2, x, y, nocopy=False):\n\n try:\n # This always makes a copy, even if projections are equivalent\n return _transform_internal(p1, p2, x, y, always_xy=True)\n except TypeError:\n if proj_is_same(p1, p2):\n if nocopy:\n return x, y\n else:\n return copy.deepcopy(x), copy.deepcopy(y)\n\n return _transform_internal(p1, p2, x, y)", "def anticommutator(A, B):\n return A @ B + B @ A", "def _mul_(self, other):\n # No need for consistency check since self and other are guaranted\n # to have the same parent. In particular, they are defined on the same\n # free module.\n #\n # Special cases:\n if self._is_identity:\n return other\n if other._is_identity:\n return self\n if other is self._inverse or self is other._inverse:\n return self._fmodule.identity_map()\n # General case:\n fmodule = self._fmodule\n resu = self.__class__(fmodule)\n basis = self.common_basis(other)\n if basis is None:\n raise ValueError(\"no common basis for the composition\")\n # The composition is performed as a tensor contraction of the last\n # index of self (position=1) and the first index of other (position=0):\n resu._components[basis] = self._components[basis].contract(1,\n other._components[basis],0)\n return resu", "def register_2d(vol_a: Volume, vol_b: Volume, axis: int = 2, transform_cls: type = TranslationTransform2D) -> Tuple[\n Volume, Volume]:\n from dipy.align.imaffine import (MutualInformationMetric,\n AffineRegistration)\n from dispim.metrics import MUTUAL_INFORMATION_METRIC, MUTUAL_INFORMATION_GRADIENT_METRIC\n\n vol_a_flat = np.mean(vol_a.data, axis=axis)\n vol_b_flat = np.mean(vol_b.data, axis=axis)\n\n if metrack.is_tracked(MUTUAL_INFORMATION_METRIC) or metrack.is_tracked(MUTUAL_INFORMATION_GRADIENT_METRIC):\n def callback(value: float, gradient: float):\n metrack.append_metric(MUTUAL_INFORMATION_METRIC, (None, value))\n metrack.append_metric(MUTUAL_INFORMATION_GRADIENT_METRIC, (None, gradient))\n else:\n callback = None\n\n nbins = 32\n sampling_prop = None\n metric = MutualInformationMetric(nbins, sampling_prop, sampling_type='grid')\n\n level_iters = [5000000, 1000000, 500000, 200000, 70000, 70000]\n sigmas = [15.0, 7.0, 3.0, 2.0, 1.0, 0.0]\n factors = [8, 4, 2, 2, 1, 1]\n\n affreg = AffineRegistration(metric=metric,\n level_iters=level_iters,\n sigmas=sigmas,\n factors=factors)\n\n transform = transform_cls()\n params0 = None\n axes = np.ones((4,), dtype=np.bool)\n axes[axis] = False\n starting_affine = vol_b.world_transform[axes][:, axes]\n\n affine = affreg.optimize(vol_a_flat, vol_b_flat, transform, params0,\n vol_a.grid_to_world_2d(axis), vol_b.grid_to_world_2d(axis),\n starting_affine=starting_affine)\n\n # TODO: Do something more clever\n if axis == 0:\n vol_b.world_transform[1:3, 1:3] = affine.affine[:2, :2]\n vol_b.world_transform[1:3, 3] = affine.affine[:2, 2]\n elif axis == 1:\n vol_b.world_transform[0, 0] = affine.affine[0, 0]\n vol_b.world_transform[2, 0] = affine.affine[1, 0]\n vol_b.world_transform[0, 2] = affine.affine[0, 1]\n vol_b.world_transform[2, 2] = affine.affine[1, 1]\n elif axis == 2:\n vol_b.world_transform[:2, :2] = affine.affine[:2, :2]\n vol_b.world_transform[:2, 3] = affine.affine[:2, 2]\n\n logger.debug('Registration transform: ' + str(vol_b.world_transform))\n\n return vol_a, vol_b", "def merge_analytics(self):\n if not (self.c1.has_analytic_ft and self.c2.has_analytic_ft):\n raise ValueError('both convolvables must have analytic FTs')\n else:\n c_out = Convolvable(None, None, None, has_analytic_ft=True)\n c_out.s1 = self.c1.copy()\n c_out.s2 = self.c2.copy()\n\n def aft(self, x, y):\n part1 = self.s1.analytic_ft(x, y)\n part2 = self.s2.analytic_ft(x, y)\n return part1 * part2\n\n c_out.analytic_ft = types.MethodType(aft, c_out)\n return c_out", "def compose(self,coefficients,t=1,N=None):\n xa,xb,ya,yb=coefficients\n l=max(len(xa),len(xb),len(ya),len(yb))\n return [(self.inverseTransform(xa,xb,t,N),self.inverseTransform(ya,yb,t,N))]", "def traj_conv(traj1, traj2, method = 'fft'):\r\n return Trajectory(conv_array(traj1.modes, traj2.modes, method = method))", "def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)", "def build_transform(self):\n if self.training:\n all_trans = [trans.BEVRandomHorizontalFlip(), trans.BEVToTensor()]\n else:\n all_trans = [trans.BEVToTensor()]\n\n self.transform = trans.Compose(all_trans)\n return self.transform", "def basic_geometric_product(obj1, obj2):\n def mul_table(b1, b2):\n return MV.base_mul_table[(b1, b2)]\n\n obj12 = bilinear_product(obj1 * obj2, mul_table)\n\n return obj12", "def _commuting_products(q_1: Q, q_2: Q) -> Dict:\n\n s_t, s_x, s_y, s_z = q_1.t, q_1.x, q_1.y, q_1.z\n q_2_t, q_2_x, q_2_y, q_2_z = q_2.t, q_2.x, q_2.y, q_2.z\n\n product_dict = {\n \"tt\": s_t * q_2_t,\n \"xx+yy+zz\": s_x * q_2_x + s_y * q_2_y + s_z * q_2_z,\n \"tx+xt\": s_t * q_2_x + s_x * q_2_t,\n \"ty+yt\": s_t * q_2_y + s_y * q_2_t,\n \"tz+zt\": s_t * q_2_z + s_z * q_2_t,\n }\n\n return product_dict", "def _compose_after_inplace(self, transform):\n # naive approach - update self to be equal to transform and\n # compose_before_from_vector_inplace\n self_vector = self.as_vector().copy()\n self.update_from_vector(transform.as_vector())\n return self.compose_before_from_vector_inplace(self_vector)", "def commutator(A, B=None):\n if B:\n return A * B - B * A\n return SPre(A) - SPost(A)", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform", "def transpose_dot(self, other):\n from divisi2 import operators\n return operators.transpose_dot(self, other)", "def compose(self, *funcs) -> \"fn\":\n return self._mod.compose(self, *funcs)", "def _compute_carry_and_output(self, x, h_tm1, c_tm1):\n x_i, x_f, x_c, x_o = x\n h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))\n f = self.recurrent_activation(x_f + K.dot(\n h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))\n c = f * c_tm1 + i * self.activation(x_c + K.dot(\n h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))\n o = self.recurrent_activation(\n x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))\n return c, o", "def _calculate_transforms(self):\n\n self._logger.info(\"Generating transformations.\")\n\n # Calculate partial transforms - get partial transformation chain;\n partial_transformation_pairs = \\\n map(lambda idx: self._get_slice_pair(idx),\n self.options.slice_range)\n\n # Flatten the slices pairs\n partial_transformation_pairs =\\\n list(flatten(partial_transformation_pairs))\n\n # If user decided to prealign the images by their centre of gravity\n # an additional series of transformations has to be carried out.\n if self.options.enableMomentsAlignment:\n commands = map(lambda x: self._get_cog_alignment(*x),\n partial_transformation_pairs)\n commands = filter(None, commands)\n\n self._logger.info(\"Executing the centre of gravity transforms.\")\n self.execute(commands)\n\n # Calculate affine transformation for each slices pair\n commands = map(lambda x: self._get_partial_transform(*x),\n partial_transformation_pairs)\n commands = filter(None, commands)\n self._logger.info(\"Executing the transformation commands.\")\n self.execute(commands)", "def compute_geometric_transform(p1,p2,best_matches):\n # How many good matches are there?\n num_bad_matches = sum([x == None for x in best_matches])\n num_good_matches = p1.shape[0]-num_bad_matches\n\n # Prepare data for fitting\n A = np.ones((3, num_good_matches))\n B = np.ones((3, num_good_matches))\n count = 0\n for i in range(p1.shape[0]):\n if best_matches[i] != None:\n A[0,count] = p1[i,0]\n A[1,count] = p1[i,1]\n A[2,count] = p1[i,2]\n B[0,count] = p2[best_matches[i],0]\n B[1,count] = p2[best_matches[i],1]\n B[2,count] = p2[best_matches[i],2]\n count += 1\n A = A.T\n B = B.T\n\n model = GeometricTransform(bScale=False)\n data = np.hstack((A,B))\n\n # Need at least seven points for a good transform fit...\n if (num_good_matches < 7):\n print 'WARNING: not enough matches to compute a geometric transform.'\n return 1, np.identity(3), np.array([0,0,0])\n elif (num_good_matches < 20):\n print 'WARNING: not enough matches to compute a robust fit.'\n return model.fit(data)\n else:\n import lflib.calibration.ransac as ransac\n try:\n bestdata = ransac.ransac(data,model,\n 10, #rand samp size (num required to fit)\n 30, #num iterations\n 4.0, #transformed dist required to be considered inlier,\n 15, #min inliers to be considered \n debug=False,return_all=False)\n return model.fit(bestdata)\n except ValueError:\n return model.fit(data)", "def transform():", "def generate_combination_transforms(self) -> None:\n if len(self._index_to_transform) == 0:\n raise ValueError(\n \"No transforms have been added yet. Nothing to base generated transforms on\")\n if len(self._transform_cycles) == 2:\n cycles = [cycle for cycle in self._transform_cycles if cycle is not None]\n assert len(cycles) == 1\n raise ValueError(\"Only a single transform cycle '{}' exists. Nothing to base \"\n \"combinations on\".format(cycles[0]))\n self._scratch.combinations.clear()\n # self._combination.transforms.clear()\n self._find_combination_targets()\n self._add_unique_combinations()", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def project(self, other):\n n = other.normalized()\n return type(self)(self.dot(n) * n)", "def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2,:2] = rotation\n H[:2, 2] = translation\n return H", "def _compute_carry_and_output_fused(self, z, c_tm1):\n z0, z1, z2, z3 = z\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n return c, o", "def lcombine( v1, v2, k1, k2 ):\n return [ x*k1 + y*k2 for (x,y) in izip(v1,v2) ]", "def compose1(f, g):\n return lambda x: f(g(x))", "def concat(a, b):\n return torch.cat((a, b), 1)", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def __call__(self, inputs):\n with tf.variable_scope('conv_t_{}'.format(self.idx)):\n activation_fn = get_act_fn(self.act_fn)\n\n if self.cfg.VAR_ON_CPU:\n kernels = variable_on_cpu(\n name='kernels',\n shape=[self.kernel_size, self.kernel_size,\n self.n_kernel, inputs.get_shape().as_list()[3]],\n initializer=self.w_init_fn,\n dtype=tf.float32)\n conv_t = tf.nn.conv2d_transpose(\n value=inputs,\n filter=kernels,\n output_shape=self.output_shape,\n strides=[1, self.stride, self.stride, 1],\n padding=self.padding)\n\n if self.use_bias:\n biases = variable_on_cpu(\n name='biases',\n shape=[self.n_kernel],\n initializer=tf.zeros_initializer(),\n dtype=tf.float32)\n conv_t = tf.nn.bias_add(conv_t, biases)\n\n if activation_fn is not None:\n conv_t = activation_fn(conv_t)\n\n else:\n biases_initializer = tf.zeros_initializer() if self.use_bias else None\n conv_t = tf.contrib.layers.conv2d_transpose(\n inputs=inputs,\n num_outputs=self.n_kernel,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n activation_fn=activation_fn,\n weights_initializer=self.w_init_fn,\n biases_initializer=biases_initializer)\n\n return conv_t", "def _transform(\n self, x: \"torch.Tensor\", y: Optional[\"torch.Tensor\"], **kwargs\n ) -> Tuple[\"torch.Tensor\", Optional[\"torch.Tensor\"]]:\n import torch\n import torchvision.transforms.functional as F\n\n img_size = x.shape[:2]\n\n angle = float(\n torch.empty(1)\n .uniform_(float(self.degree_range[0]), float(self.degree_range[1]))\n .item()\n )\n\n max_dx = float(self.translate[0] * img_size[1])\n max_dy = float(self.translate[1] * img_size[0])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n\n scale = float(torch.empty(1).uniform_(self.scale[0], self.scale[1]).item())\n\n # x needs to have channel first\n x = x.permute(2, 0, 1)\n x = F.affine(\n img=x, angle=angle, translate=translations, scale=scale, shear=(0.0, 0.0)\n )\n x = x.permute(1, 2, 0)\n\n return torch.clamp(x, min=self.clip_values[0], max=self.clip_values[1]), y", "def compose(*funcs):\n if not funcs:\n return identity\n else:\n f0 = funcs[0]\n def composed(_):\n # f_1 o f_2 o ... o f_n\n pre_composed = compose(*funcs[1:])\n return f0(pre_composed(_))\n return composed", "def correlate(array1,array2):\r\n arrayout = np.conj(fft2(array1)) * fft2(array2)\r\n return ifft2(arrayout)" ]
[ "0.7939624", "0.6803194", "0.6570444", "0.632991", "0.63013947", "0.62177503", "0.61400926", "0.6106614", "0.61058927", "0.6075287", "0.5950678", "0.59419036", "0.5940465", "0.5888778", "0.585534", "0.58440787", "0.5840003", "0.5824449", "0.578852", "0.5774012", "0.57467675", "0.57031405", "0.5691068", "0.5681797", "0.5657176", "0.56545097", "0.56540674", "0.5646774", "0.5612634", "0.559813", "0.55745566", "0.55543435", "0.553583", "0.55202216", "0.55108273", "0.55051136", "0.5504769", "0.54958", "0.549248", "0.54842114", "0.54699814", "0.54673296", "0.54527813", "0.54386854", "0.543207", "0.5430778", "0.54262376", "0.54161143", "0.5410717", "0.54028666", "0.53966624", "0.53828454", "0.5377761", "0.53668535", "0.53517115", "0.5344785", "0.5330493", "0.5327464", "0.5326328", "0.5325997", "0.5323832", "0.5319919", "0.5311097", "0.53069615", "0.52873576", "0.5282165", "0.5280762", "0.52806973", "0.5279103", "0.52762115", "0.5275763", "0.52735835", "0.5271811", "0.525486", "0.52537346", "0.52390146", "0.5217256", "0.5216004", "0.5216004", "0.5216004", "0.5214955", "0.52107364", "0.5210196", "0.51953", "0.5195071", "0.5190543", "0.51882035", "0.5186745", "0.5183657", "0.5182986", "0.5182585", "0.5173528", "0.5169553", "0.5159371", "0.5159043", "0.5158243", "0.51567787", "0.5156398", "0.51502854", "0.51493084" ]
0.5716005
21
Combine 2D alignent parameters including mirror
def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2): t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":mirror1,"scale":1.0}) t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":mirror2,"scale":1.0}) tt = t2*t1 d = tt.get_params("2D") return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "mirror" ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def image_align(first_image, second_image):\r\n\r\n high_diff = (second_image.shape[0] - first_image.shape[0]) // 2\r\n width_diff = (second_image.shape[1] - first_image.shape[1]) // 2\r\n\r\n align_image = second_image[high_diff: high_diff + first_image.shape[0],\r\n width_diff: width_diff + first_image.shape[1],\r\n :]\r\n\r\n\r\n assert align_image.shape == first_image.shape\r\n\r\n return align_image", "def test_align(self):\n al = align(self.amp1, self.amp2).m\n\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert.mean(axis=0)[0], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[1], 0, delta=TestAlign.DELTA)\n self.assertAlmostEqual(al.vert.mean(axis=0)[2], 0, delta=TestAlign.DELTA)", "def new_mirror(self,alongx,alongy):\n Knew = K.clone()\n if alongx:\n Knew[0,2] = size[0]-Knew[0,2]\n if alongy:\n Knew[1,2] = size[1]-Knew[1,2]\n return CameraInfo(self.size,Knew,self.dist)", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def affine_align(x, y, p1, p2, g, s):\n #Create M, Ix, and Iy as Y x X matrices of 0's\n M = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Ix = [[0]*(len(x)+1) for i in range(len(y)+1)]\n Iy = [[0]*(len(x)+1) for i in range(len(y)+1)]\n #Set up initial values for Ix and Iy\n #M infs along both axes\n for i in range(1, len(y)+1):\n M[i][0] = -math.inf\n for j in range(1, len(x)+1):\n M[0][j] = -math.inf\n #Ix: Aligning X with gap, horizontal move, infs along top row\n for i in range(0, len(y)+1):\n Ix[i][0] = -math.inf\n #Gap penalties along left column\n for j in range(1, len(x)+1):\n Ix[0][j] = -g if Ix[0][j-1] == -math.inf else Ix[0][j-1] - s\n #Iy: Aligning Y with gap, vertical move, infs along left column\n for j in range(0, len(x)+1):\n Iy[0][j] = -math.inf\n #Gap penalties along top row\n for i in range(1, len(y)+1):\n Iy[i][0] = -g if Iy[i-1][0] == -math.inf else Iy[i-1][0] - s\n #Populate remaining cells\n for i in range(1, len(y)+1):\n for j in range(1, len(x)+1):\n M[i][j] = max(M[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Ix[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2),\n Iy[i-1][j-1] + delta(x[j-1], y[i-1], p1, p2))\n Ix[i][j] = max(M[i][j-1] - g,\n Ix[i][j-1] - s)\n Iy[i][j] = max(M[i-1][j] - g,\n Iy[i-1][j] - s)\n #TRACEBACK\n x_ret=\"\"; y_ret=\"\"\n i = len(y); j = len(x)\n #Determine start matrix\n align_scores = (M[i][j], Iy[i][j], Ix[i][j])\n matrix_idx = align_scores.index(max(align_scores))\n #matrix_key will track the current matrix through the traceback\n matrix_key = [\"M\", \"Iy\", \"Ix\"][matrix_idx]\n while i > 0 and j > 0:\n #From M: Check diagonal moves back to all three matrices, align characters\n if matrix_key == \"M\":\n if M[i][j] == M[i-1][j-1] + p1 or M[i][j] == M[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"M\"\n elif M[i][j] == Iy[i-1][j-1] + p1 or M[i][j] == Iy[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Iy\"\n elif M[i][j] == Ix[i-1][j-1] + p1 or M[i][j] == Ix[i-1][j-1] - p2:\n x_ret = x[j-1] + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1; j -= 1\n matrix_key = \"Ix\"\n #From Iy: Check vertical move to Iy and M, align y character with x gap\n elif matrix_key == \"Iy\":\n if Iy[i][j] == M[i-1][j] - g:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"M\"\n elif Iy[i][j] == Iy[i-1][j] - s:\n x_ret = \"_\" + x_ret\n y_ret = y[i-1] + y_ret\n i -= 1\n matrix_key = \"Iy\"\n #From Ix: Check horizontal move to Ix and M, align x character with y gap\n elif matrix_key == \"Ix\":\n if Ix[i][j] == M[i][j-1] - g:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"M\"\n elif Ix[i][j] == Ix[i][j-1] - s:\n x_ret = x[j-1] + x_ret\n y_ret = \"_\" + y_ret\n j -= 1\n matrix_key = \"Ix\"\n #Finish sequence if edge was reached\n #i>0 means mach remaining characters in y with gaps in x\n if i > 0:\n x_ret = (\"_\"*i) + x_ret\n y_ret = y[0:i] + y_ret\n #j>0 means mach remaining characters in x with gaps in y\n if j > 0:\n x_ret = x[0:j] + x_ret\n y_ret = (\"_\"*j) + y_ret\n #Return alinged strings\n return (x_ret, y_ret)", "def set_params2D(ima, p, xform = \"xform.align2d\"):\n\tt = Transform({\"type\":\"2D\",\"alpha\":p[0],\"tx\":p[1],\"ty\":p[2],\"mirror\":p[3],\"scale\":p[4]})\n\tima.set_attr(xform, t)", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def alignPairShapes(s1,s2,weights):\n\n\n s1=np.asarray(s1)\n s2=np.asarray(s2)\n \n x1k=s1[:,0]\n y1k=s1[:,1]\n x2k=s2[:,0]\n y2k=s2[:,1]\n\n X1=sum(x1k*weights) \n X2=sum(x2k*weights)\n\n Y1=sum(y1k*weights)\n Y2=sum(y2k*weights)\n\n Z=sum(weights*(pow(x2k,2)+pow(y2k,2)))\n\n W=sum(weights)\n\n C1=sum(weights*(x1k*x2k+y1k*y2k))\n\n C2=sum(weights*(y1k*x2k-x1k*y2k))\n \n a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]])\n b=np.asarray([X1,Y1,C1,C2])\n\n x=np.linalg.solve(a,b)\n\n ax=x[0]\n ay=x[1]\n tx=x[2]\n ty=x[3]\n return ax,ay,tx,ty", "def _align(self):\n\n shape = np.shape(self.x)\n\n # Get angle of direction (cbi: center beam index)\n # NOTE: This implementation seems to be unstable, because the shot with the center beam index can be NaN\n # cbi = np.median(np.arange(len(self.x[0, :]))).astype(int)\n # vec1 = [self.x[0, cbi], self.y[0, cbi], 0.0]\n # vec2 = [self.x[-1, cbi], self.y[-1, cbi], 0.0]\n\n # Alternative implementation with mean over all entries within the line.\n # -> should be a good approximation of the line center\n # NOTE: 2019-05-30: Relaxed the criterion even further (mean of first and last 10 scan lines)\n vec1 = [np.nanmedian(self.x[0:10, :]), np.nanmedian(self.y[0:10, :]), 0.0]\n vec2 = [np.nanmedian(self.x[-11:-1, :]), np.nanmedian(self.y[-11:-1, :]), 0.0]\n angle = -1.0*np.arctan((vec2[1]-vec1[1])/(vec2[0]-vec1[0]))\n\n # validity check -> Do not rotate if angle is nan\n if np.isnan(angle):\n return\n\n # Get center point\n xc = np.nanmedian(self.x)\n yc = np.nanmedian(self.y)\n\n # Reform points\n points = [self.x.flatten()-xc, self.y.flatten()-yc]\n\n # Execute the rotation\n rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n points_rotated = rot_matrix.dot(points)\n self.x = np.reshape(points_rotated[0, :], shape)\n self.y = np.reshape(points_rotated[1, :], shape)\n\n # Save conversion parameters for reuse\n self._align_parameters = {'center_point': (xc, yc),\n 'angle': angle,\n 'rotation_matrix': rot_matrix}", "def alignChannels(red, green, blue):\n trans = 30\n h = len(red)\n w = len(red[0])\n alignGreenX, alignGreenY = align2_new(red, green, trans)\n alignBlueX, alignBlueY = align2_new(red, blue, trans)\n result = np.zeros((h + trans*2, w + trans*2, 3))\n result[trans:h+trans, trans:w+trans, 0] = red\n result[trans+alignGreenY:h+trans+alignGreenY, trans + alignGreenX:w+trans+alignGreenX, 1] = green\n result[trans+alignBlueY:h+trans+alignBlueY, trans + alignBlueX:w+trans+alignBlueX, 2] = blue\n \n return result", "def align_reconstruction_to_pdr(reconstruction, data):\n if reconstruction.alignment.aligned:\n return reconstruction\n\n if not data.pdr_shots_exist():\n return reconstruction\n\n pdr_shots_dict = data.load_pdr_shots()\n\n X, Xp = [], []\n onplane, verticals = [], []\n for shot_id in reconstruction.shots.keys():\n X.append(reconstruction.shots[shot_id].pose.get_origin())\n Xp.append(pdr_shots_dict[shot_id][0:3])\n R = reconstruction.shots[shot_id].pose.get_rotation_matrix()\n onplane.append(R[0,:])\n onplane.append(R[2,:])\n verticals.append(R[1,:])\n\n X = np.array(X)\n Xp = np.array(Xp)\n\n # Estimate ground plane.\n p = multiview.fit_plane(X - X.mean(axis=0), onplane, verticals)\n Rplane = multiview.plane_horizontalling_rotation(p)\n X = Rplane.dot(X.T).T\n\n # Estimate 2d similarity to align to pdr predictions\n T = tf.affine_matrix_from_points(X.T[:2], Xp.T[:2], shear=False)\n s = np.linalg.det(T[:2, :2]) ** 0.5\n A = np.eye(3)\n A[:2, :2] = T[:2, :2] / s\n A = A.dot(Rplane)\n b = np.array([\n T[0, 2],\n T[1, 2],\n Xp[:, 2].mean() - s * X[:, 2].mean() # vertical alignment\n ])\n\n # Align points.\n for point in reconstruction.points.values():\n p = s * A.dot(point.coordinates) + b\n point.coordinates = p.tolist()\n\n # Align cameras.\n for shot in reconstruction.shots.values():\n R = shot.pose.get_rotation_matrix()\n t = np.array(shot.pose.translation)\n Rp = R.dot(A.T)\n tp = -Rp.dot(b) + s * t\n try:\n shot.pose.set_rotation_matrix(Rp)\n shot.pose.translation = list(tp)\n except:\n logger.debug(\"unable to transform reconstruction!\")\n\n return reconstruction", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def test_align_points(self):\n mv = [\n [0, 0, 5],\n [5, 0, 5],\n [0, 5, 5]\n ]\n sv = [\n [0, 0, 0],\n [5, 0, 0],\n [0, 5, 0]\n ]\n al = align(self.amp1, self.amp2, mv=mv, sv=sv, method='contPoints').m\n zMax = self.amp1.vert[:, 2].max() - 5\n # Both objects are already centered, so should be close to origin (allowing for some inaccuracy)\n self.assertAlmostEqual(al.vert[:, 2].max(), zMax, delta=TestAlign.DELTA)", "def mirrorPair(N,srcdist=89.61e3+1.5e3,primalign=np.zeros(6),\\\n secalign=np.zeros(6),rrays=False,f=None,\\\n plist=[[0],[0],[0]],hlist=[[0],[0],[0]]):\n #Establish subannulus of rays\n rays = sources.subannulus(220.,221.,100./220.,N,zhat=-1.)\n #Transform to node position\n tran.transform(rays,220,0,0,0,0,0)\n #Set up finite source distance\n raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n rays[4] = rays[1]/raydist\n rays[5] = rays[2]/raydist\n rays[6] = -sqrt(1.-rays[4]**2-rays[5]**2)\n\n #Place mirror pair\n coords = [tran.tr.identity_matrix()]*4\n tran.transform(rays,-220+conic.primrad(8450.,220.,8400.),0,50.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*primalign,coords=coords)\n tran.transform(rays,-conic.primrad(8450.,220.,8400.),0,-8450.,0,0,0,\\\n coords=coords)\n## surf.wolterprimary(rays,220.,8400.)\n surf.primaryLL(rays,220.,8400.,8500.,8400.,100./220,*plist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8500.,\\\n rays[3]>8400.))\n tran.reflect(rays)\n #Place secondary in primary's reference frame\n tran.transform(rays,conic.secrad(8350.,220.,8400.),0,8350.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*secalign,coords=coords)\n tran.itransform(rays,conic.secrad(8350.,220.,8400.),0,8350.,0,0,0,\\\n coords=coords)\n## surf.woltersecondary(rays,220.,8400.)\n surf.secondaryLL(rays,220.,8400.,1.,8400.,8300.,100./220,*hlist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.,\\\n rays[3]>8300.))\n tran.reflect(rays)\n\n #Go back to nominal node reference frame and down to focus\n rays = tran.applyT(rays,coords,inverse=True)\n\n if f is None:\n f = -surf.focusI(rays)\n print f\n else:\n tran.transform(rays,0,0,-f,0,0,0)\n surf.flat(rays)\n\n if rrays is True:\n return rays\n \n return anal.hpd(rays)/f * 180/np.pi * 60.**2, \\\n airnp.mean(rays[1]), np.mean(rays[2])", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def align(self):\n ...", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def alignment(gram1, gram2):\n # BUG: this loss function causes abnormal optimization behaviors, see\n # comments in past commits\n\n alignment = frobenius_inner_prod(gram1, gram2) /\\\n m.sqrt(frobenius_inner_prod(gram1, gram1) *\n frobenius_inner_prod(gram2, gram2))\n return alignment", "def align(stroke1, stroke2):\n\n x1 = np.array(stroke1.x)\n x2 = np.array(stroke2.x)\n y1 = np.array(stroke1.y)\n y2 = np.array(stroke2.y)\n\n d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n m = d - np.min(d)\n\n Ix1 = np.argmax(x1)\n Ix2 = np.argmax(x2)\n Iy1 = np.argmax(y1)\n Iy2 = np.argmax(y2)\n\n ix1 = np.argmin(x1)\n ix2 = np.argmin(x2)\n iy1 = np.argmin(y1)\n iy2 = np.argmin(y2)\n\n # rephasing :\n u = np.array([(Ix1 - Ix2), (Iy1 - Iy2), (ix1 - ix2), (iy1 - iy2)])\n indice_period = np.argmin(np.abs(u))\n period = u[indice_period]\n new_x1 = np.array(x1[period:].tolist() + x1[0:period].tolist())\n new_y1 = np.array(y1[period:].tolist() + y1[0:period].tolist())\n x1 = new_x1\n y1 = new_y1\n\n # resorting : if symetric part, revert it\n mx = np.max((x1, x2), 0)\n my = np.max((y1, y2), 0)\n sym_score = abs(x1 - x2[::-1]) + abs(y1 - y2[::-1])\n if len(x1[sym_score < 50]) > 20:\n x1[sym_score < 40] = x1[sym_score < 40][::-1]\n y1[sym_score < 40] = y1[sym_score < 40][::-1]\n\n new_d = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n new_m = new_d - min(new_d)\n\n return x1, y1, d, new_d, m, new_m", "def align(img, left_eye, right_eye):\n left_eye_x, left_eye_y = left_eye\n right_eye_x, right_eye_y = right_eye\n point_3rd, direction = (left_eye, -1) if left_eye_y > right_eye_y else (right_eye, 1)\n\n # np.linalg.norm is being used for euclidean distance\n a = np.linalg.norm(np.array(left_eye) - np.array(point_3rd))\n b = np.linalg.norm(np.array(right_eye) - np.array(point_3rd))\n c = np.linalg.norm(np.array(right_eye) - np.array(left_eye))\n\n if b != 0 and c != 0:\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n angle = (angle * 180) / math.pi\n if direction == -1:\n angle = 90 - angle\n img = Image.fromarray(img)\n img = np.array(img.rotate(direction * angle))\n\n return img", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def align_reconstruction_no_numpy(reconstruction, anchor_points_dict):\n modified_shots_dict = {}\n all_anchor_shot_ids = sorted(anchor_points_dict.keys())\n for i in range(len(all_anchor_shot_ids) - 1):\n anchor_coords = []\n recon_coords = []\n\n for j in range(2):\n shot_id = all_anchor_shot_ids[i+j]\n anchor_coords.append(anchor_points_dict[shot_id])\n o = get_origin_no_numpy_opencv(reconstruction.shots[shot_id].pose.rotation,\n reconstruction.shots[shot_id].pose.translation)\n\n recon_coords.append(o)\n\n s, A, b = get_affine_transform_2d_no_numpy(anchor_coords, recon_coords)\n\n start_shot_id = all_anchor_shot_ids[i]\n end_shot_id = all_anchor_shot_ids[i+1]\n\n # in first iteration, we transform from first shot of recon\n # in last iteration, we transform until last shot of recon\n shot_ids = sorted(reconstruction.shots.keys())\n if i == 0:\n start_shot_id = shot_ids[0]\n\n if i == len(anchor_points_dict)-2:\n end_shot_id = shot_ids[-1]\n\n new_dict = {}\n\n start_index = _shot_id_to_int(start_shot_id)\n end_index = _shot_id_to_int(end_shot_id)\n\n # transform pdr shots\n for i in range(start_index, end_index + 1):\n shot_id = _int_to_shot_id(i)\n\n if shot_id in reconstruction.shots:\n X = get_origin_no_numpy_opencv(reconstruction.shots[shot_id].pose.rotation,\n reconstruction.shots[shot_id].pose.translation)\n A_dot_X = [A[0][0] * X[0] + A[0][1] * X[1] + A[0][2] * X[2],\n A[1][0] * X[0] + A[1][1] * X[1] + A[1][2] * X[2],\n A[2][0] * X[0] + A[2][1] * X[1] + A[2][2] * X[2]]\n Xp = [i * s + j for i, j in zip(A_dot_X, b)]\n new_dict[shot_id] = [Xp[0], Xp[1], Xp[2]]\n\n modified_shots_dict.update(new_dict)\n\n return modified_shots_dict", "def align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp):\n\n\tif prefilt : ref=ref.process(\"filter.matchto\",{\"to\":ptcl})\n\n\t# initial alignment\n\tif align!=None :\n\t\tali=ptcl.align(align[0],ref,align[1],aligncmp[0],aligncmp[1])\n\n\t# refine alignment if requested\n\tif ralign!=None:\n\t\tralign[1][\"xform.align2d\"] = ali.get_attr(\"xform.align2d\")\n\t\tali=ptcl.align(ralign[0],ref,ralign[1],raligncmp[0],raligncmp[1])\n\n\treturn ali", "def mirror(self, p1=(0, 1), p2=(0, 0)):\n if hasattr(p1, \"center\"):\n p1 = p1.center\n if hasattr(p2, \"center\"):\n p2 = p2.center\n p1 = np.array(p1)\n p2 = np.array(p2)\n # Translate so reflection axis passes through origin\n self.origin = self.origin - p1\n\n # Rotate so reflection axis aligns with x-axis\n angle = np.arctan2((p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / pi\n self.origin = _rotate_points(self.origin, angle=-angle, center=[0, 0])\n self.rotation -= angle\n\n # Reflect across x-axis\n self.x_reflection = not self.x_reflection\n self.origin[1] = -self.origin[1]\n self.rotation = -self.rotation\n\n # Un-rotate and un-translate\n self.origin = _rotate_points(self.origin, angle=angle, center=[0, 0])\n self.rotation += angle\n self.origin = self.origin + p1\n\n if self.owner is not None:\n self.owner._bb_valid = False\n return self", "def mirror_augmentation():\n return lambda image: ImageOps.mirror(image)", "def pixel_align(pixel_size: float, length_mm: float) -> float:\n return round(length_mm / pixel_size) * pixel_size", "def prf_align(prf1, prf2):\n\n Δ0,a0,b0 = guess_align_params(prf1, prf2)\n params = lmfit.Parameters()\n params.add('Δ', value=Δ0)\n params.add('a', value=a0)\n params.add('b', value=b0)\n \n prf_diff = prf_diff_fn(prf1, prf2)\n \n res = lmfit.minimize(prf_diff, params)\n \n Δ = res.params['Δ']\n a = res.params['a']\n b = res.params['b']\n \n return prf_shift_scale(prf2, Δ, a, b)", "def _align_homography(self, base_view, view):\n # matches = match_descriptors(view.descriptors,\n # self.views[0].descriptors, cross_check=True)\n matches = self.comparer.get_matches(base_view, view)\n src = view.keypoints[matches[:, 0], ::-1]\n dst = base_view.keypoints[matches[:, 1], ::-1]\n\n warped = np.array([np.nan])\n while np.isnan(warped.sum()):\n h, inliers = ransac([src, dst], skimage.transform.ProjectiveTransform,\n min_samples=8, residual_threshold=2, max_trials=400)\n\n warped = skimage.transform.warp(view.image, h.inverse)\n return WarpedView(view.position, warped, view.original)", "def __mirror(self, x, y):\n return (self.width - x - 1, self.height - y - 1)", "def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir", "def Alignsequence(structure1, structure2):\n\n ppb = PPBuilder()\n for pp in ppb.build_peptides(structure1):\n sequence1 = pp.get_sequence()\n for pp in ppb.build_peptides(structure2):\n sequence2 = pp.get_sequence()\n\n alignment = pairwise2.align.globalxx(sequence1, sequence2)\n return alignment", "def amplify_2d_shape(shape, x_amplify, y_amplify):", "def align(): # open EH and fast shutter\n\t#marAuxiliary.closeMarShield()\n\td2in()\n\td3in()\n\tsh('o')", "def warp(img1, img2, M):\n\n # Get width and height of input images \n w1,h1 = img1.shape[:2]\n w2,h2 = img2.shape[:2]\n\n # Get the canvas dimesions\n img2_dims = np.float32([ [0,0], [0,w2], [h2, w2], [h2,0] ]).reshape(-1,1,2)\n img1_dims_temp = np.float32([ [0,0], [0,w1], [h1, w1], [h1,0] ]).reshape(-1,1,2)\n\n # Find out the boundary of img1 after projected onto the coord. system of img2\n img1_dims = myPerspectiveTransform(img1_dims_temp, M)\n\n # Resulting dimensions\n result_dims = np.concatenate( (img1_dims, img2_dims), axis = 0)\n \n # Getting images together\n # Calculate dimensions of match points\n x_min, y_min = np.int32(result_dims.min(axis=0).ravel() - 0.5)\n x_max, y_max = np.int32(result_dims.max(axis=0).ravel() + 0.5)\n\n # Create output array after affine transformation \n transform_dist = [-x_min,-y_min]\n transform_array = np.array([[1, 0, transform_dist[0]], \n [0, 1, transform_dist[1]], \n [0,0,1]]) \n \n # Warp images to get the resulting image\n result_img = myWarpPerspective(img1, transform_array.dot(M),\n (x_max-x_min, y_max-y_min))\n alpha = 0.1\n #result_img[transform_dist[1]:w1+transform_dist[1], \n # transform_dist[0]:h1+transform_dist[0]] = img2 \n print(transform_dist)\n #result_img[transform_dist[1]:w1+transform_dist[1], transform_dist[0]:transform_dist[0]+h1] = img1[transform_dist[1]:w1+transform_dist[1], transform_dist[0]:transform_dist[0]+h1] \n #result_img[transform_dist[1]:w1+transform_dist[1], \n # transform_dist[0]:transform_dist[0]+50] = img2[0:w1 , 0 : 50] \n alpha = 0.5\n img1_rest = x_max-x_min - h1\n print(img1_rest)\n #print(h1)\n for j in range(0 , h1):\n for i in range(0 , w1):\n alpha = 0.02 * j\n if alpha > 1:\n alpha = 1\n \n result_img[i + transform_dist[1], j + transform_dist[0]] = img2[i , j] * alpha + result_img[i + transform_dist[1] , j + transform_dist[0]] *(1 - alpha)\n #result_img[i + transform_dist[1], j + transform_dist[0]] = img2[i , j] * alpha \n return result_img", "def update_affine_param( cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2\n cur_af = cur_af.view(cur_af.shape[0], 4, 3)\n last_af = last_af.view(last_af.shape[0],4,3)\n updated_af = torch.zeros_like(cur_af.data).to(cur_af.device)\n dim =3\n updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])\n updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)\n updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)\n return updated_af", "def combine(proximal_origin, distal_origin):\n return (proximal_origin << _SHIFT) | distal_origin", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def reflection_pad2d(self, x, pad=1):\n x = torch.cat([torch.flip(x[:, :, 1:pad + 1, :], [2]), x, torch.flip(x[:, :, -pad - 1:-1, :], [2])], 2)\n x = torch.cat([torch.flip(x[:, :, :, 1:pad + 1], [3]), x, torch.flip(x[:, :, :, -pad - 1:-1], [3])], 3)\n return x", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def mirror_edges(im, psf_width):\n\n # How much we need to pad\n pad_i = psf_width - (im.shape[0] % psf_width)\n pad_j = psf_width - (im.shape[1] % psf_width)\n\n # Get widths\n pad_top = pad_i // 2\n pad_bottom = pad_i - pad_top\n pad_left = pad_j // 2\n pad_right = pad_j - pad_left\n\n # Do the padding\n return np.pad(im, ((pad_top, pad_bottom), (pad_left, pad_right)), mode=\"reflect\")", "def alignCtx(*args, align: bool=True, anchorFirstObject: bool=False, distribute: bool=True,\n exists: bool=True, history: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", name: AnyStr=\"\",\n showAlignTouch: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def translate(self, diff: AnyVec) -> None:\n for p in self.planes:\n p += diff\n\n u_axis = Vec(self.uaxis.x, self.uaxis.y, self.uaxis.z)\n v_axis = Vec(self.vaxis.x, self.vaxis.y, self.vaxis.z)\n\n # Fix offset - see 2013 SDK utils/vbsp/map.cpp:2237\n self.uaxis.offset -= Vec.dot(u_axis, diff) / self.uaxis.scale\n self.vaxis.offset -= Vec.dot(v_axis, diff) / self.vaxis.scale", "def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)", "def _mps_AA(self, A1, A2):\n Dl, d1, _ = A1.shape\n _, d2, Dr = A2.shape\n return np.reshape(np.tensordot(A1, A2, axes=(2, 0)), [Dl, d1 * d2, Dr])", "def hybrid_align_pdr(data, target_images=None):\n # load gps points and convert them to topocentric\n gps_points_dict = data.load_gps_points()\n reflla = data.load_reference_lla()\n\n curr_gps_points_dict = {}\n for key, value in gps_points_dict.items():\n x, y, z = geo.topocentric_from_lla(\n value[0], value[1], value[2],\n reflla['latitude'], reflla['longitude'], reflla['altitude'])\n curr_gps_points_dict[key] = [x, y, z]\n\n # now align recons that has 2 or more gps points (and trusted shots if any). we run through the same\n # procedure as in hybrid gps picking process, so that the output will be identical. only difference\n # is that align_reconstruction_segments below calculates the full camera pose, as opposed to position\n # only in align_reconstruction_no_numpy.\n scale_factor = data.config['reconstruction_scale_factor']\n pdr_shots_dict = data.load_pdr_shots()\n reconstructions = data.load_reconstruction()\n\n MIN_RECON_SIZE = 100\n\n aligned_recons = []\n aligned_shots_dict = curr_gps_points_dict.copy()\n\n graph = None\n\n # init pdr predictions\n pdr_predictions_dict = update_pdr_global_2d(curr_gps_points_dict, pdr_shots_dict, scale_factor, False)\n\n # align recons to gps points and/or trusted shots\n while True:\n can_align = False\n for recon in reconstructions:\n if recon.alignment.aligned or len(recon.shots) < MIN_RECON_SIZE:\n continue\n\n recon_gps_points = {}\n recon_trusted_shots = {}\n\n # match gps points to this recon\n for shot_id in recon.shots:\n if shot_id in curr_gps_points_dict:\n recon_gps_points[shot_id] = curr_gps_points_dict[shot_id]\n\n # find trusted shots on this recon if not enough gps points\n if len(recon_gps_points) < 2:\n recon_shot_ids = sorted(recon.shots)\n\n if recon_shot_ids[0] not in curr_gps_points_dict and \\\n _prev_shot_id(recon_shot_ids[0]) in aligned_shots_dict:\n recon_trusted_shots[recon_shot_ids[0]] = pdr_predictions_dict[recon_shot_ids[0]][:3]\n\n if recon_shot_ids[-1] not in curr_gps_points_dict and \\\n _next_shot_id(recon_shot_ids[-1]) in aligned_shots_dict:\n recon_trusted_shots[recon_shot_ids[-1]] = pdr_predictions_dict[recon_shot_ids[-1]][:3]\n\n if len(recon_gps_points) + len(recon_trusted_shots) >= 2:\n # combine trusted shots with gps points\n recon_trusted_shots.update(recon_gps_points)\n\n # only need to load graph if it hasn't been loaded before AND there are more than\n # 2 trusted points on this recon (hence the need to break it into segments)\n if graph is None and len(recon_trusted_shots) > 2:\n graph = data.load_tracks_graph()\n\n # below, each 'segment' is a Reconstruction object\n segments = align_reconstruction_segments(data, graph, recon, recon_trusted_shots)\n aligned_recons.extend(segments)\n\n # the 'shot' objects in segments are the same as those in recon\n for shot_id in recon.shots:\n aligned_shots_dict[shot_id] = recon.shots[shot_id].pose.get_origin()\n\n # update pdr predictions based on aligned shots so far\n pdr_predictions_dict = update_pdr_global_2d(aligned_shots_dict, pdr_shots_dict, scale_factor, False)\n\n recon.alignment.aligned = True\n can_align = True\n break\n\n if not can_align:\n break\n\n # for shots that are not in aligned recons at this point, we throw them in a new recon. the\n # camera poses are calculated using the same method as direct align\n pdr_predictions_dict = update_pdr_global_2d(aligned_shots_dict, pdr_shots_dict, scale_factor, False)\n\n if not target_images:\n target_images = data.config.get('target_images', [])\n\n cameras = data.load_camera_models()\n\n direct_align_recon = types.Reconstruction()\n direct_align_recon.cameras = cameras\n\n for img in target_images:\n\n if img in aligned_shots_dict:\n continue\n\n camera = cameras[data.load_exif(img)['camera']]\n\n shot = types.Shot()\n shot.id = img\n shot.camera = camera\n shot.pose = types.Pose()\n\n prev_img = _prev_shot_id(img)\n next_img = _next_shot_id(img)\n\n curr_coords = pdr_predictions_dict[img][:3]\n\n prev_heading = next_heading = heading = None\n if prev_img in pdr_predictions_dict:\n prev_coords = pdr_predictions_dict[prev_img][:3]\n prev_heading = np.arctan2(curr_coords[1] - prev_coords[1], curr_coords[0] - prev_coords[0])\n\n if next_img in pdr_predictions_dict:\n next_coords = pdr_predictions_dict[next_img][:3]\n next_heading = np.arctan2(next_coords[1] - curr_coords[1], next_coords[0] - curr_coords[0])\n\n if prev_heading and next_heading:\n heading = phase((rect(1, prev_heading) + rect(1, next_heading)) * 0.5)\n elif prev_heading:\n heading = prev_heading\n elif next_heading:\n heading = next_heading\n\n if not heading:\n continue\n\n R1 = _euler_angles_to_rotation_matrix([np.pi*0.5, 0, np.pi*0.5])\n R2 = _euler_angles_to_rotation_matrix([np.radians(pdr_shots_dict[img][4]), np.radians(pdr_shots_dict[img][3]), heading])\n R = R2.dot(R1)\n\n t_shot = np.array(pdr_predictions_dict[img][:3])\n tp = -R.T.dot(t_shot)\n\n shot.pose.set_rotation_matrix(R.T)\n shot.pose.translation = list(tp)\n\n direct_align_recon.add_shot(shot)\n\n if len(direct_align_recon.shots) > 0:\n direct_align_recon.alignment.aligned = True\n direct_align_recon.alignment.num_correspondences = len(direct_align_recon.shots)\n\n aligned_recons.append(direct_align_recon)\n\n return aligned_recons", "def rigid_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj\r\n r = math.sqrt(v_out.dot(v_out))\r\n v_out /= r\r\n v_sub = v - p_wgt\r\n r = math.sqrt(v_sub.dot(v_sub))\r\n v_out *= r\r\n v_out += q_wgt\r\n return v_out", "def func_c_align_split_n(self, args):\n tik_instance, ub_ori, ub_trans, n_before, n_len = args\n\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n dhw_d = d_d * h_d * w_d\n hw_d = h_d * w_d\n\n data_offset = n_before * self.c_0\n ub_offset = 0\n ori_nburst = dhw_d * self.c_1\n burst_len = n_len * self.c_0 // self.cp_align_len\n src_stride = (n_d - n_len) * self.c_0 // self.cp_align_len\n dst_stride = 0\n args = tik_instance, self.src_gm, ub_ori, data_offset, ub_offset, \\\n ori_nburst, burst_len, src_stride, dst_stride, self.cp_align_len\n _gm_to_ub_one(args)\n\n hwnoni = hw_d * n_len\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, self.c_1) as num_c1:\n ori_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * hwnoni * self.c_0\n trans_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * self.c_0\n nburst = hwnoni\n burst_len = self.c_0 // self.cp_align_len\n src_stride = 0\n dst_stride = (self.c_1 - 1) * self.c_0 // self.cp_align_len\n tik_instance.data_move(\n ub_trans[trans_cur],\n ub_ori[ori_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_cur = num_dhw * n_len * c_d\n dst_cur = num_dhw * c_d\n nburst = n_len\n burst_len = c_d // self.cp_align_len\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d // self.cp_align_len\n tik_instance.data_move(\n ub_ori[dst_cur],\n ub_trans[src_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n dst_offset = n_before * dhw_d * c_d\n burst_len = n_len * dhw_d * c_d // self.cp_align_len\n tik_instance.data_move(self.dst_gm[dst_offset],\n ub_ori,\n 0, 1, burst_len, 0, 0)", "def align(model,\n left,\n right,\n max_length = 512):\n inputs = preprocess(left, right, max_length)\n output = model(inputs)\n output = expand(output)\n scores, path, params = postprocess(output, len(left), len(right))\n return Alignment(left, right, scores, path, params)", "def final_homography(pts1, pts2, feats1, feats2):\n\n #\n # Your code here\n #\n\n idxs1, idxs2 = find_matches(feats1, feats2)\n ransac_return = ransac(pts1[idxs1], pts2[idxs2])\n\n return ransac_return, idxs1, idxs2", "def align_attach(*args):\n # check selection, curves, etc\n sel = cmds.ls(sl=True)\n crv1 = \"\"\n crv2 = \"\"\n\n if sel and len(sel)== 2:\n check1 = rig.type_check(sel[0], \"nurbsCurve\")\n check2 = rig.type_check(sel[1], \"nurbsCurve\")\n if not check1 and check2:\n cmds.warning(\"you must select two curves!\")\n return\n else:\n cmds.warning(\"you must select two curves!\")\n return\t\t\n\n crv1, crv2 = sel[0], sel[1]\n newCrv = cmds.alignCurve(crv1, crv2, ch=False, replaceOriginal=False, attach=True, keepMultipleKnots=True, positionalContinuityType=2, tangentContinuity=False, curvatureContinuity=False, name = \"{0}_ATT\".format(crv1))\n cmds.setAttr(\"{0}.v\".format(crv1), 0)\n cmds.setAttr(\"{0}.v\".format(crv2), 0)", "def align(args) :\n from aligner import align_reads\n align_reads(args)", "def align(self):\n flag=0\n input=None\n level=None\n board=None\n ainps={'L0':[],'L1':[],'L2':[],'H0':[]} \n for i in self.inputs:\n if(i.inputnumber.var.get() == 1):\n if i.inpnumall == rareradio:\n input=i.inpnum\n level=i.level\n board=i.board\n print 'Rare chosen:',level,' ',input\n ainps[i.level].append(i.inpnum)\n flag=flag+1\n #print 'ainps:',ainps \n if flag < 2 :\n print \"Align: less then 2 inputs chosen. \" \n return\n if input==None:\n cmd=\"setRareFlag(0,0,0)\"\n else:\n mode='0'\n if level == 'H0': mode = '1'\n cmd=\"setRareFlag(\"+board+','+input+','+mode+\")\"\n print \"seting rare: \",cmd\n output=self.vb.io.execute(cmd,log=\"yes\",applout=\"<>\") \n self.align=Corel(self.vb,ainps)\n self.align.croscor()", "def _align(self, sum_heads_encoded, sum_bodies_encoded):\n _tmp_heads_encoded = tf.expand_dims(sum_heads_encoded, 1)\n vector_attn = tf.reduce_sum(\n tf.multiply(tf.nn.l2_normalize(sum_bodies_encoded, 2), tf.nn.l2_normalize(_tmp_heads_encoded, 2)), axis=2,\n keepdims=True)\n return tf.nn.softmax(vector_attn, axis=1)", "def mirror_api(data, height, distance):\n\n return {key: {'vertical': vert_angle(key, data, height, distance),\n 'horizontal': horiz_angle(key, data)} for key in data.keys()}", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def mirrorTransformations_Custom(self):\n\n pass", "def mirror_densities(densities, mirror):\n nx, ny, nz = densities.shape\n if mirror[0]:\n densities = np.concatenate(\n (densities, densities[range(nx - 1, -1, -1), :, :]), axis=0)\n if mirror[1]:\n densities = np.concatenate(\n (densities, densities[:, range(ny - 1, -1, -1), :]), axis=1)\n if mirror[2]:\n densities = np.concatenate(\n (densities, densities[:, :, range(nz - 1, -1, -1)]), axis=2)\n return densities", "def imalign(src_file, dst_file, face_landmarks, output_size=1024, transform_size=1024, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):\n lm = np.array(face_landmarks)\n lm_chin = lm[0 : 17] # left-right\n lm_eyebrow_left = lm[17 : 22] # left-right\n lm_eyebrow_right = lm[22 : 27] # left-right\n lm_nose = lm[27 : 31] # top-down\n lm_nostrils = lm[31 : 36] # top-down\n lm_eye_left = lm[36 : 42] # left-clockwise\n lm_eye_right = lm[42 : 48] # left-clockwise\n lm_mouth_outer = lm[48 : 60] # left-clockwise\n lm_mouth_inner = lm[60 : 68] # left-clockwise\n\n # Calculate auxiliary vectors.\n eye_left = np.mean(lm_eye_left, axis=0)\n eye_right = np.mean(lm_eye_right, axis=0)\n eye_avg = (eye_left + eye_right) * 0.5\n eye_to_eye = eye_right - eye_left\n mouth_left = lm_mouth_outer[0]\n mouth_right = lm_mouth_outer[6]\n mouth_avg = (mouth_left + mouth_right) * 0.5\n eye_to_mouth = mouth_avg - eye_avg\n\n # Choose oriented crop rectangle.\n x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]\n x /= np.hypot(*x)\n x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)\n x *= x_scale\n y = np.flipud(x) * [-y_scale, y_scale]\n c = eye_avg + eye_to_mouth * em_scale\n quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])\n qsize = np.hypot(*x) * 2\n\n # Load in-the-wild image.\n if not os.path.isfile(src_file):\n print('\\nCannot find source image. Please run \"--wilds\" before \"--align\".')\n return\n img = Image.open(src_file)\n\n # Shrink.\n shrink = int(np.floor(qsize / output_size * 0.5))\n if shrink > 1:\n rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))\n img = img.resize(rsize, Image.ANTIALIAS)\n quad /= shrink\n qsize /= shrink\n\n # Crop.\n border = max(int(np.rint(qsize * 0.1)), 3)\n crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))\n if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:\n img = img.crop(crop)\n quad -= crop[0:2]\n\n # Pad.\n pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))\n pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))\n if enable_padding and max(pad) > border - 4:\n pad = np.maximum(pad, int(np.rint(qsize * 0.3)))\n img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')\n h, w, _ = img.shape\n y, x, _ = np.ogrid[:h, :w, :1]\n mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))\n blur = qsize * 0.02\n img += (ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)\n img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)\n img = np.uint8(np.clip(np.rint(img), 0, 255))\n if alpha:\n mask = 1-np.clip(3.0 * mask, 0.0, 1.0)\n mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))\n img = np.concatenate((img, mask), axis=2)\n img = Image.fromarray(img, 'RGBA')\n else:\n img = Image.fromarray(img, 'RGB')\n quad += pad[:2]\n\n # Transform.\n img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR)\n print(transform_size)\n if output_size < transform_size:\n img = img.resize((output_size, output_size), Image.ANTIALIAS)\n\n # Save aligned image.\n img.save(dst_file, 'PNG')", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def affine_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n M1 = mat2(0)\r\n M2 = mat2(0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n M1 += p_adj.transpose_multiply(p_adj)*w[i]\r\n M2 += p_adj.transpose_multiply(q_adj)*w[i]\r\n M1 = M1.inverse()\r\n M = M1*M2\r\n M = M.transpose()\r\n v_out = M*(v - p_wgt) + q_wgt\r\n return v_out", "def test_align_two_alignments(self):\n res = align_two_alignments(self.align1_fp, self.align2_fp, RNA)\n self.assertEqual(res.toFasta(), self.align_two_align)", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def test_direct_shape():\n\n n = 21\n x = np.ones((n, n))\n\n recon = abel.direct.direct_transform(x, direction='forward')\n assert recon.shape == (n, n) \n\n recon = abel.direct.direct_transform(x, direction='inverse')\n assert recon.shape == (n, n)", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def c_align_small(self, tik_instance):\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n hw_d = h_d * w_d\n hwnoni = hw_d * self.n_o * self.n_i\n dhw_d = d_d * h_d * w_d\n\n ub_ori = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n\n burst_len = d_d * self.c_1 * hwnoni * self.c_0 // self.cp_align_len\n tik_instance.data_move(ub_ori,\n self.src_gm,\n 0, 1, burst_len, 0, 0)\n\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, self.c_1) as num_c1:\n ori_cur = num_d * self.c_1 * hwnoni * self.c_0\\\n + num_c1 * hwnoni * self.c_0\n trans_cur = num_d * self.c_1 * hwnoni * self.c_0\\\n + num_c1 * self.c_0\n nburst = hwnoni\n burst_len = self.c_0 // self.cp_align_len\n src_stride = 0\n dst_stride = (self.c_1 - 1) * self.c_0 // self.cp_align_len\n tik_instance.data_move(\n ub_trans[trans_cur],\n ub_ori[ori_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_cur = num_dhw * self.n_o * self.n_i * c_d\n dst_cur = num_dhw * c_d\n nburst = n_d\n burst_len = c_d // self.cp_align_len\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d // self.cp_align_len\n tik_instance.data_move(\n ub_ori[dst_cur],\n ub_trans[src_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n burst_len = n_d * dhw_d * c_d // self.cp_align_len\n tik_instance.data_move(self.dst_gm,\n ub_ori,\n 0, 1, burst_len, 0, 0)\n\n return tik_instance", "def test_align_idx(self):\n self.amp4.rotateAng([5, 5, 5], ang='deg')\n al = align(self.amp3, self.amp4, mv=[0, 1, 2, 3], sv=[0, 1, 2, 3], method='idxPoints')\n all(self.assertAlmostEqual(al.m.vert[i, 0], al.s.vert[i, 0], delta=0.1) for i in range(al.s.vert.shape[0]))", "def distMap(frame1, frame2):\r\n frame1_32 = np.float32(frame1)\r\n frame2_32 = np.float32(frame2)\r\n diff32 = frame1_32 - frame2_32\r\n norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/np.sqrt(255**2 + 255**2 + 255**2)\r\n dist = np.uint8(norm32*255)\r\n return dist", "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def similarity_transform_2d(v, mapping, alpha = 1):\r\n p_wgt = vec2(0, 0)\r\n q_wgt = vec2(0, 0)\r\n w = len(mapping)*[None]\r\n w_sum = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n x = mp[0].x - v.x\r\n y = mp[0].y - v.y\r\n if (x == 0 and y == 0): return mp[1]\r\n w[i] = 1/((x*x + y*y) ** alpha)\r\n p_wgt += mp[0]*w[i]\r\n q_wgt += mp[1]*w[i]\r\n w_sum += w[i]\r\n p_wgt /= w_sum\r\n q_wgt /= w_sum\r\n mu = 0\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n mu += w[i]*(p_adj.dot(p_adj))\r\n A_fac = mat2([v.x - p_wgt.x, v.y - p_wgt.y, v.y - p_wgt.y, p_wgt.x - v.x])\r\n v_out = vec2(0, 0)\r\n for i in range(len(mapping)):\r\n mp = mapping[i]\r\n p_adj = mp[0] - p_wgt\r\n q_adj = mp[1] - q_wgt\r\n A = mat2([p_adj.x, p_adj.y, p_adj.y, -p_adj.x])*A_fac*w[i]\r\n A = A.transpose()\r\n v_out += A*q_adj/mu\r\n v_out += q_wgt\r\n return v_out", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def is_aligned_2d(self, p_list1, p_list2, p_left_key, p_top_key, p_width_key, p_height_key):\n # check if terms align vertically\n IVA = self.is_align_1D(p_list1, p_list2, p_left_key, p_width_key)\n # check if terms align horizonally\n IHA = self.is_align_1D(p_list1, p_list2, p_top_key, p_height_key)\n if IVA == True:\n # if terms align vertically get direction and distance\n return self.get_vertical_align_direction(p_list1, p_list2, p_top_key, p_height_key)\n elif IHA == True:\n # if terms align vertically get direction and distance\n return self.get_horizontal_align_direction(p_list1, p_list2, p_left_key, p_width_key)\n else: return None", "def ellipsoidPair(N,srcdist=89.61e3+1.5e3,primalign=np.zeros(6),\\\n secalign=np.zeros(6),rrays=False,f=None,\\\n plist=[[0],[0],[0]],hlist=[[0],[0],[0]]):\n #Establish subannulus of rays\n r1 = conic.ellipsoidRad(srcdist,1.,220.,8400.,8500.)\n rays = sources.subannulus(220.,r1,100./220.,N,zhat=-1.)\n tran.pointTo(rays,0,0,srcdist,reverse=1.)\n## #Transform to node position\n## tran.transform(rays,220,0,0,0,0,0)\n## #Set up finite source distance\n## raydist = sqrt(srcdist**2+rays[1]**2+rays[2]**2)\n## rays[4] = rays[1]/raydist\n## rays[5] = rays[2]/raydist\n## rays[6] = -sqrt(1.-rays[4]**2-rays[5]**2)\n\n #Place mirror pair\n coords = [tran.tr.identity_matrix()]*4\n prad = conic.ellipsoidRad(srcdist,1.,220.,8400.,8450.)\n tran.transform(rays,prad,0,50.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*primalign,coords=coords)\n tran.transform(rays,-prad,0,-8450.,0,0,0,\\\n coords=coords)\n surf.ellipsoidPrimaryLL(rays,220.,8400.,srcdist,1.,8500.,8400.,100./220,\\\n *plist)\n #Vignette any rays outside of active area\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8500.,\\\n rays[3]>8400.))\n## surf.ellipsoidPrimary(rays,220.,8400.,srcdist,1.)\n tran.reflect(rays)\n #Place secondary in primary's reference frame\n srad = conic.ehSecRad(srcdist,1.,220.,8400.,8350.)\n tran.transform(rays,srad,0,8350.,0,0,0,\\\n coords=coords)\n tran.transform(rays,*secalign,coords=coords)\n tran.itransform(rays,srad,0,8350.,0,0,0,\\\n coords=coords)\n## surf.ellipsoidSecondary(rays,220.,8400.,srcdist,1.)\n surf.ellipsoidSecondaryLL(rays,220.,8400.,srcdist,1.,8400.,8300.,100./220,\\\n *hlist)\n rays = tran.vignette(rays,ind=np.logical_and(rays[3]<8400.,\\\n rays[3]>8300.))\n ang = anal.grazeAngle(rays)\n tran.reflect(rays)\n\n #Go back to nominal node reference frame and down to focus\n rays = tran.applyT(rays,coords,inverse=True)\n\n if f is None:\n f = -surf.focusI(rays)\n print f\n else:\n tran.transform(rays,0,0,-f,0,0,0)\n surf.flat(rays)\n\n if rrays is True:\n return rays\n \n return anal.hpd(rays)/f * 180/np.pi * 60.**2", "def _align(cycles, embs, num_steps, num_cycles, cycle_length,\n similarity_type, temperature):\n logits_list = []\n labels_list = []\n for i in range(num_cycles):\n logits, labels = _align_single_cycle(cycles[i],\n embs,\n cycle_length,\n num_steps,\n similarity_type,\n temperature)\n logits_list.append(logits)\n labels_list.append(labels)\n\n logits = torch.cat(logits_list, dim=0)\n labels = torch.cat(labels_list, dim=0)\n\n return logits, labels", "def create_mirror_normal(self):\n # mirror\n self.mirror_line = Line(ORIGIN, ORIGIN + self.mirror_width * RIGHT, color=self.mirror_color).move_to(self.mirror_origin)\n\n # normal\n normal_center = self.mirror_origin + 0.5 * self.normal_length * UP\n self.normal_line = Line (ORIGIN, ORIGIN + self.normal_length * UP, color=self.normal_color).move_to(normal_center)\n self.normal_arrow = Arrow(ORIGIN, ORIGIN + self.normal_length * UP, color=self.normal_color,\n stroke_width = 4, buff=0).move_to(normal_center)\n\n # text normal\n self.text_normal_fig = TextMobject(r\"Normal\").scale(1.4).move_to(-0.2 * RIGHT + 0.0 * UP)\n self.text_mirror_fig = TextMobject(r\"Mirror\").scale(1.4).move_to( 3.0 * RIGHT + -2.0 * UP)\n\n # right side 90 degree angle (right side right angle)\n self.right_right_angle = Elbow(width = self.right_right_angle_len,\n angle = 0, color = YELLOW, about_point = ORIGIN)\n self.right_right_angle.move_to(self.mirror_origin +\n 0.5 * self.right_right_angle_len * RIGHT + 0.5 * self.right_right_angle_len * UP)\n\n # left side 90 degree angle (left side right angle)\n self.left_right_angle = Elbow(width = self.left_right_angle_len,\n angle = PI/2, color = YELLOW, about_point = ORIGIN)\n self.left_right_angle.move_to(self.mirror_origin +\n -0.5 * self.left_right_angle_len * RIGHT + 0.5 * self.left_right_angle_len * UP)", "def equalize(pair, bias_axis, word_to_vec_map):\n\n ### START CODE HERE ###\n # Step 1: Select word vector representation of \"word\". Use word_to_vec_map. (≈ 2 lines)\n w1, w2 = pair\n e_w1, e_w2 = (word_to_vec_map[w1], word_to_vec_map[w2])\n\n # Step 2: Compute the mean of e_w1 and e_w2 (≈ 1 line)\n mu = (e_w1 + e_w2) / 2\n\n # Step 3: Compute the projections of mu over the bias axis and the orthogonal axis (≈ 2 lines)\n mu_B = np.dot(mu, bias_axis) / np.sum(np.dot(bias_axis, bias_axis)) * bias_axis\n mu_orth = mu - mu_B\n\n # Step 4: Use equations (7) and (8) to compute e_w1B and e_w2B (≈2 lines)\n e_w1B = np.dot(e_w1, bias_axis) / np.sum(np.dot(bias_axis, bias_axis)) * bias_axis\n e_w2B = np.dot(e_w2, bias_axis) / np.sum(np.dot(bias_axis, bias_axis)) * bias_axis\n\n # Step 5: Adjust the Bias part of e_w1B and e_w2B using the formulas (9) and (10) given above (≈2 lines)\n corrected_e_w1B = np.sqrt(np.abs(1 - np.sum(np.dot(mu_orth, mu_orth)))) * (e_w1B - mu_B) / np.sqrt(\n np.sum(np.dot(e_w1 - mu_orth - mu_B, e_w1 - mu_orth - mu_B)))\n corrected_e_w2B = np.sqrt(np.abs(1 - np.sum(np.dot(mu_orth, mu_orth)))) * (e_w2B - mu_B) / np.sqrt(\n np.sum(np.dot(e_w2 - mu_orth - mu_B, e_w2 - mu_orth - mu_B)))\n\n # Step 6: Debias by equalizing e1 and e2 to the sum of their corrected projections (≈2 lines)\n e1 = corrected_e_w1B + mu_orth\n e2 = corrected_e_w2B + mu_orth\n\n ### END CODE HERE ###\n\n return e1, e2", "def norm_align(norm, d, reference):\n \n if(np.dot(norm, reference) < 0):\n norm = [x * (-1.0) for x in norm]\n d = d * (-1.0)\n return norm, d", "def norm_align(norm, d, reference):\n \n if(np.dot(norm, reference) < 0):\n norm = [x * (-1.0) for x in norm]\n d = d * (-1.0)\n return norm, d", "def get_mapping(self, type = 'orthogonal', anchor_method = 'mutual_nn', max_anchors = None):\n # Method 1: Orthogonal projection that best macthes NN\n self.compute_scores(score_type='coupling') # TO refresh\n if anchor_method == 'mutual_nn':\n pseudo = self.find_mutual_nn()#[:100]\n elif anchor_method == 'all':\n translations, oov = self.generate_translations()\n pseudo = [(k,v[0]) for k,v in translations.items()]\n if max_anchors:\n pseudo = pseudo[:max_anchors]\n print('Finding orthogonal mapping with {} anchor points via {}'.format(len(pseudo), anchor_method))\n if anchor_method in ['mutual_nn', 'all']:\n idx_src = [self.src_word2ind[ws] for ws,_ in pseudo]\n idx_trg = [self.trg_word2ind[wt] for _,wt in pseudo]\n xs_nn = self.xs[idx_src]\n xt_nn = self.xt[idx_trg]\n P = orth_procrustes(xs_nn, xt_nn)\n elif anchor_method == 'barycenter':\n ot_emd = ot.da.EMDTransport()\n ot_emd.xs_ = self.xs\n ot_emd.xt_ = self.xt\n ot_emd.coupling_= self.coupling\n xt_hat = ot_emd.inverse_transform(Xt=self.xt) # Maps target to source space\n P = orth_procrustes(xt_hat, self.xt)\n return P", "def _align_segment(mask, prototype):\n K = prototype.shape[0]\n c_matrix = np.dot(prototype, mask.T)\n\n reverse_permutation = np.zeros((K,), dtype=np.int)\n estimated_permutation = np.zeros((K,), dtype=np.int)\n\n for _ in range(K):\n c_max = np.max(c_matrix, axis=0)\n index_0 = np.argmax(c_matrix, axis=0)\n index_1 = np.argmax(c_max)\n c_matrix[index_0[index_1], :] = -1\n c_matrix[:, index_1] = -1\n reverse_permutation[index_0[index_1]] = index_1\n estimated_permutation[index_1] = index_0[index_1]\n\n return reverse_permutation", "def align(shape, target):\n translated = translate_to_origin(shape)\n scaled = normalize(translated)\n aligned = rotate_to_target(scaled, target)\n return aligned", "def augment():\n print(\"augmenting......\")\n path1 = '../trainp1/'\n path2 = '../trainp2/'\n # path of pair1 and pair2 similar to img & mask task for segmentation\n p = Augmentor.Pipeline(path1) # pair1\n p.ground_truth(path2) # pair2\n p.rotate(probability=0.3, max_left_rotation=3, max_right_rotation=3) \n p.flip_left_right(probability=0.2) \n p.random_distortion(0.5, 2, 2, 2)\n p.zoom(probability=0.5, min_factor=0.95, max_factor=1.05)\n p.process()", "def combine_trans_projection(tx: float, ty: float, rot: float, mag: float, x: float, y: float, z: float, phi: float, the: float, psi: float, tiltangle: float, dim: float, binning: int, particle_dim: int = 200):\n from numpy import cos, sin, pi\n\n # Calculates the inverse transformation matrix of the projection alignment transformations\n alpha = -rot * pi/180\n c = cos(alpha)\n s = sin(alpha)\n\n rotate = np.matrix([[c, s, 0], [-s, c, 0], [0, 0, 1]])\n magnify = np.matrix([[mag, 0, 0], [0, mag, 0], [0, 0, 1]])\n translate = np.matrix([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n align_transformations = np.linalg.inv(rotate * magnify * translate)\n\n # Map the 3D position to a 2D position on the projection of the tiltangle\n x = x * binning\n y = y * binning\n z = z * binning\n\n aligned_y = y # assume the rotation axis is around y\n aligned_x = (cos(tiltangle * pi / 180) * (x - dim / 2) -\n sin(tiltangle * pi / 180) * (z - dim / 2)) + dim / 2\n\n # Use the projection alignment transformations to map this 2D position to a 2D position on the raw projections\n aligned_pos = np.matrix([[aligned_x - dim/2], [aligned_y - dim/2], [1]])\n raw_pos = align_transformations * aligned_pos\n\n # Calculate the rotation matrix for the template, a combination of the particle rotation and the tilt angle\n template_3d_rotation = generate_rotation_matrix(0, tiltangle, 0) * generate_rotation_matrix(\n phi, the, psi) * matrix_rotate_3d_z(rot) * matrix_magnify_3d(mag)\n\n # Merge this matrix with the projection transformations\n merged_matrix = template_3d_rotation\n\n return (align_transformations, (raw_pos.item(0, 0) + dim/2, raw_pos.item(1, 0) + dim/2), (aligned_x, aligned_y), merged_matrix)", "def augment(x: np.ndarray, y: np.ndarray):\n scipy.random.seed()\n\n # scale = np.random.normal(1, 0.1, size=3)\n alpha, theta = np.random.normal(0, 9, size=2)\n alpha = 0\n\n for i in range(1, len(x.shape) - 1):\n if np.random.binomial(1, .5):\n x = np.flip(x, -i)\n y = np.flip(y, -i)\n\n # mscan = np.array([_scale_crop(i) for i in mscan])\n # segm = _scale_crop(segm[0])[np.newaxis]\n\n x = _rotate(x, 3, theta, alpha)\n y = _rotate(y, 0, theta, alpha)\n\n # if np.random.binomial(1, .5):\n # t = np.random.choice([-90, 0, 90])\n # a = np.random.choice([-90, 0, 90])\n # mscan = _rotate(mscan, 3, t, a)\n # segm = _rotate(segm, 3, t, a)\n\n x = np.array([i * np.random.normal(1, 0.35) for i in x])\n return x, y", "def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')", "def MatchMatrixs (self,Node1,Node2):\n\n T1Native_Node = Node1\n T1Native_Matrix = slicer.util.arrayFromVolume(T1Native_Node)\n DimN = T1Native_Matrix.shape\n T1Enhanced_Node = Node2\n T1Enhanced_Matrix = slicer.util.arrayFromVolume(T1Enhanced_Node)\n DimE = T1Enhanced_Matrix.shape\n\n NMatrix = self.GetIJKToRASnpArray(T1Native_Node)\n NVector = NMatrix[:-1,-1]\n EMatrix = self.GetIJKToRASnpArray(T1Enhanced_Node)\n EVector = EMatrix[:-1,-1]\n NPixelSize = [np.linalg.norm(NMatrix[:-1,0]), np.linalg.norm(NMatrix[:-1,1])]\n EPixelSize = [np.linalg.norm(EMatrix[:-1,0]), np.linalg.norm(EMatrix[:-1,1])]\n\n Niversor = NMatrix[:-1,0]/NPixelSize[0]\n Njversor = NMatrix[:-1,1]/NPixelSize[1]\n Nkversor = np.round(np.cross(Niversor,Njversor),3)\n Nkstep = round(np.linalg.norm(NMatrix[:-1,2]),3)\n\n Eiversor = EMatrix[:-1,0]/EPixelSize[0]\n Ejversor = EMatrix[:-1,1]/EPixelSize[1]\n Ekversor = np.round(np.cross(Eiversor,Ejversor),3)\n Ekstep = round(np.linalg.norm(EMatrix[:-1,2]),3)\n print(Nkversor,Ekversor,Nkstep,Ekstep,NVector,EVector,(NVector-EVector).dot(Ekversor))\n if not ( np.sum(Nkversor==Ekversor) == 3 and Nkstep==Ekstep and ((NVector-EVector).dot(Ekversor)) == 0 ): # it verifies if the slices are oriented in the same direction, with the same step between slices and if the first images are complanar.\n slicer.util.warningDisplay('The geometry of the LL Native and LL Enhanced volume doesn\\'t match. It could deteriorate the ECV map', windowTitle= 'Warning')\n\n if (DimE == DimN):\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode()) \n return [T1Native_Matrix,T1Enhanced_Matrix]\n if (DimE[1:3] == DimN[1:3]):\n k = min([DimE[1],DimN[1]])\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode())\n return [T1Native_Matrix[:k,:,:],T1Enhanced_Matrix[:k,:,:]]\n\n jN = np.arange(0,DimN[2]*NPixelSize[1],NPixelSize[1])+NPixelSize[1]/2+(NVector-EVector).dot(Njversor)\n iN = np.arange(0,DimN[1]*NPixelSize[0],NPixelSize[0])+NPixelSize[0]/2+(NVector-EVector).dot(Niversor)\n iE = np.arange(0,DimE[1]*EPixelSize[0],EPixelSize[0])+EPixelSize[0]/2\n jE = np.arange(0,DimE[2]*EPixelSize[1],EPixelSize[1])+EPixelSize[1]/2 \n if DimE[1] > DimN[1]: ## I concidered a square image\n T1Nreshaped = np.zeros(DimE)\n for k in range(DimN[0]):\n f = interpolate.interp2d(iN, jN, np.nan_to_num(T1Native_Matrix[k,:,:]), fill_value = 0)\n T1Nreshaped[k,:,:] = f(iE, jE)\n T1Ereshaped = T1Enhanced_Matrix[:k+1,:,:]\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.EnhancedT1_Selector.currentNode())\n return [T1Nreshaped,T1Ereshaped]\n else:\n T1Ereshaped = np.zeros(DimN)\n for k in range(DimE[0]):\n f = interpolate.interp2d(iE, jE, np.nan_to_num(T1Enhanced_Matrix[k,:,:]), fill_value = 0)\n T1Ereshaped[k,:,:] = f(iN, jN) \n T1Nreshaped = T1Native_Matrix[:k+1,:,:]\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode()) \n return [T1Nreshaped,T1Ereshaped]", "def _transform_warp_impl3d(\n src: Tensor,\n dst_pix_trans_src_pix: Tensor,\n dsize_src: tuple[int, int, int],\n dsize_dst: tuple[int, int, int],\n grid_mode: str,\n padding_mode: str,\n align_corners: bool,\n) -> Tensor:\n dst_norm_trans_src_norm: Tensor = normalize_homography3d(dst_pix_trans_src_pix, dsize_src, dsize_dst)\n\n src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm)\n return homography_warp3d(src, src_norm_trans_dst_norm, dsize_dst, grid_mode, padding_mode, align_corners, True)", "def reverse_params(cls, ax_params, space): # pylint: disable=unused-argument\n orion_params = copy.deepcopy(ax_params)\n return orion_params", "def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):\n left_pad, right_pad = width_pair\n old_length = padded.shape[axis] - right_pad - left_pad\n\n if include_edge:\n # Edge is included, we need to offset the pad amount by 1\n edge_offset = 1\n else:\n edge_offset = 0 # Edge is not included, no need to offset pad amount\n old_length -= 1 # but must be omitted from the chunk\n\n if left_pad > 0:\n # Pad with reflected values on left side:\n # First limit chunk size which can't be larger than pad area\n chunk_length = min(old_length, left_pad)\n # Slice right to left, stop on or next to edge, start relative to stop\n stop = left_pad - edge_offset\n start = stop + chunk_length\n left_slice = _slice_at_axis(slice(start, stop, -1), axis)\n left_chunk = padded[left_slice]\n\n if method == \"odd\":\n # Negate chunk and align with edge\n edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)\n left_chunk = 2 * padded[edge_slice] - left_chunk\n\n # Insert chunk into padded area\n start = left_pad - chunk_length\n stop = left_pad\n pad_area = _slice_at_axis(slice(start, stop), axis)\n padded[pad_area] = left_chunk\n # Adjust pointer to left edge for next iteration\n left_pad -= chunk_length\n\n if right_pad > 0:\n # Pad with reflected values on right side:\n # First limit chunk size which can't be larger than pad area\n chunk_length = min(old_length, right_pad)\n # Slice right to left, start on or next to edge, stop relative to start\n start = -right_pad + edge_offset - 2\n stop = start - chunk_length\n right_slice = _slice_at_axis(slice(start, stop, -1), axis)\n right_chunk = padded[right_slice]\n\n if method == \"odd\":\n # Negate chunk and align with edge\n edge_slice = _slice_at_axis(\n slice(-right_pad - 1, -right_pad), axis)\n right_chunk = 2 * padded[edge_slice] - right_chunk\n\n # Insert chunk into padded area\n start = padded.shape[axis] - right_pad\n stop = start + chunk_length\n pad_area = _slice_at_axis(slice(start, stop), axis)\n padded[pad_area] = right_chunk\n # Adjust pointer to right edge for next iteration\n right_pad -= chunk_length\n\n return left_pad, right_pad", "def distMap(frame1, frame2):\n frame1_32 = np.float32(frame1)\n frame2_32 = np.float32(frame2)\n diff32 = frame1_32 - frame2_32\n norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/np.sqrt(255**2 + 255**2 + 255**2)\n dist = np.uint8(norm32*255)\n return dist", "def distMap(frame1, frame2):\n frame1_32 = np.float32(frame1)\n frame2_32 = np.float32(frame2)\n diff32 = frame1_32 - frame2_32\n norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/np.sqrt(255**2 + 255**2 + 255**2)\n dist = np.uint8(norm32*255)\n return dist" ]
[ "0.6219051", "0.6053582", "0.5752545", "0.57426506", "0.5725641", "0.5692372", "0.5691541", "0.5664221", "0.56270707", "0.5618279", "0.5603235", "0.5593693", "0.5545809", "0.545052", "0.5446854", "0.5441767", "0.54299045", "0.54238695", "0.54219955", "0.5421373", "0.5344707", "0.5332255", "0.531676", "0.5315854", "0.5310657", "0.5307549", "0.5307014", "0.5289081", "0.5280396", "0.5273125", "0.5266314", "0.52591085", "0.52572393", "0.52475595", "0.5233083", "0.52130175", "0.5207462", "0.51876533", "0.5175388", "0.5173748", "0.51602864", "0.51468855", "0.5134944", "0.51293886", "0.51280785", "0.51270103", "0.51268476", "0.5120284", "0.5119911", "0.5119178", "0.51173586", "0.51120937", "0.5109932", "0.51064605", "0.5097986", "0.50887537", "0.5077787", "0.507518", "0.50719607", "0.5061718", "0.5050718", "0.5046829", "0.50373006", "0.5036034", "0.5019572", "0.50171524", "0.5016851", "0.5012813", "0.5010622", "0.5009149", "0.50037265", "0.5002646", "0.50012285", "0.49947566", "0.49908558", "0.49781775", "0.4977393", "0.49760473", "0.4972172", "0.49442133", "0.49421543", "0.49414757", "0.49394268", "0.49354935", "0.49347293", "0.4934222", "0.4934222", "0.4933546", "0.49279356", "0.49271291", "0.49268302", "0.4918853", "0.49182254", "0.491487", "0.4913783", "0.49134296", "0.49131832", "0.49089998", "0.4902984", "0.4902984" ]
0.6524807
0
Convert a text file that is composed of columns of numbers into spider doc file
def create_spider_doc(fname,spiderdoc): from string import atoi,atof infile = open(fname,"r") lines = infile.readlines() infile.close() nmc = len(lines[0].split()) table=[] for line in lines: data = line.split() for i in xrange(0,nmc): data[i] = atof(data[i]) table.append(data) drop_spider_doc(spiderdoc ,table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_docs(filename):\n \n # open word doc\n word = win32.gencache.EnsureDispatch('Word.Application')\n doc = word.Documents.Open(os.getcwd() + '/' + filename + \".doc\")\n doc.Activate()\n \n # read word doc as list of lists\n data = [doc.Tables(i).Range.Text for i in range(1,5)]\n data = ''.join(data)\n data = data.replace('\\r\\x07\\r\\x07', ', ')\n data = data.replace('\\r\\x07', ', ')\n data = data.split(\", \")\n \n # separate columns into lists\n varname = data[0::4]\n description = data[1::4]\n valuelineref = data[2::4]\n type = data[3::4]\n\n # create pandas dataframe and clean up\n df = pd.DataFrame(list(zip(varname, description, valuelineref, type)))\n doc.Close(True) # is this a function?\n headers = df.iloc[0]\n df = df[1:]\n df.columns = headers\n df['Variable Name'] = df['Variable Name'].str.replace('\\r','')\n \n # store as csv\n df.to_csv(filename + '.csv', index = False)\n return df", "def doc_to_df(self, doc_no):\n doc_txt = pd.DataFrame()\n i = 1\n with open ('{doc_id}.txt'.format(doc_id = doc_no)) as file:\n for line in file:\n words = pd.Series(line.split(' '))\n doc_txt = doc_txt.append(words, ignore_index=True)\n return doc_txt", "def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))", "def parseManual(filename):\n r = parse(filename)\n res = {}\n for qid in r:\n res[qid] = [int(y) for y in r[qid].split(\",\")]\n return res", "def preformat_coldesc(txt):\r\n\r\n # Converting unnumbered lists directly to DocBook:\r\n #\r\n # The list:\r\n #\r\n # - one\r\n # - two\r\n # - three\r\n #\r\n # Is converted to:\r\n # The list:\r\n # +++<itemizedlist>\r\n # <listitem><simpara> one </simpara></listitem>\r\n # <listitem><simpara> two </simpara></listitem>\r\n # <listitem><simpara> three </simpara></listitem>\r\n # </itemizedlist>\r\n #\r\n # 1. The list must be preceded with a text line, \r\n # followed by two blank lines.\r\n # 2. Each list item must start with \"minus\" (-) without indention.\r\n # Line breaks inside list items are not allowed.\r\n # 3. Two or more list items must exist.\r\n\r\n if not txt: txt=\"\"\r\n g = re.compile(\"(\\n\\s*)((\\n- [^\\n]+){2,})\")\r\n txt = g.sub(r\"\\1 +++<itemizedlist> \\2 </itemizedlist>+++\", txt)\r\n\r\n g = re.compile(r\"(\\+\\+\\+<itemizedlist>.*\\n)- ([^\\n]+)(.*</itemizedlist>\\+\\+\\+)\", re.DOTALL)\r\n while(g.search(txt)):\r\n txt = g.sub(r\"\\1 <listitem><simpara>+++ \\2 +++</simpara></listitem> \\3\", txt)\r\n\r\n return txt", "def parse_page(lines, results):\n weights = None\n column = []\n for line in lines:\n if Utils.is_only_newline(line): # No content in this line, it must separate two columns.\n if column and is_weight_column(column[0]):\n weights = parse_weight_column(column[1:])\n if column and not is_weight_column(column[0]):\n parse_data_rate_column(column, weights, results)\n column = []\n else:\n column.append(line)\n else:\n parse_data_rate_column(column, weights, results)", "def drop_spider_doc(filename, data, comment = None):\n\toutf = open(filename, \"w\")\n\tfrom datetime import datetime\n\toutf.write(\" ; %s %s %s\\n\" % (datetime.now().ctime(), filename, comment))\n\tcount = 1 # start key from 1; otherwise, it is confusing...\n\tfor dat in data:\n\t\ttry:\n\t\t\tnvals = len(dat)\n\t\t\tif nvals <= 5: datstrings = [\"%5d %d\" % (count, nvals)]\n\t\t\telse : datstrings = [\"%6d %d\" % (count, nvals)]\n\t\t\tfor num in dat:\n\t\t\t\tdatstrings.append(\"%12.5g\" % (num))\n\t\texcept TypeError:\n\t\t\t# dat is a single number\n\t\t\tdatstrings = [\"%5d 1%12.5g\" % (count, dat)]\n\t\tdatstrings.append(\"\\n\")\n\t\toutf.write(\"\".join(datstrings))\n\t\tcount += 1\n\toutf.close()", "def parse_file(file):\n for line in open(file,'r'):\n line = line.strip()\n token = line.split('\\t')\n ### loop through ids in second column and print with first columns \n for item in token[1].split(','):\n print item+'\\t'+token[0]", "def read_text_file(file_name, ncol = 0):\n\t\n\tfrom string import split\n\tinf = file(file_name, \"r\")\n\tline = inf.readline()\n\tdata = []\n\twhile len(line) > 0:\n\t\tif ncol == -1:\n\t\t\tvdata = split(line)\n\t\t\tif data == []:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata.append([float(vdata[i])])\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(vdata)):\n\t\t\t\t\tdata[i].append(float(vdata[i]))\t\t\t\n\t\telse:\n\t\t\tvdata = float(split(line)[ncol])\n\t\t\tdata.append(vdata)\n\t\tline = inf.readline()\n\treturn data", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n X = loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n return fast_sparse_matrix(X)", "def parse_2016(year, file):\n with open(file) as file:\n content = file.read()\n # Place, Name, Age, Sex/plc, Sex, Time, Pace, City, State, Bib No\n cols = [\n 'place', 'first_name', 'last_name', 'age', 'sexpl', 'sex',\n 'time', 'pace', 'city', 'state', 'bib'\n ]\n parser = TDParser(columns=cols)\n parser.feed(content)\n return parser.results", "def format_data(file):\r\n \r\n \r\n data = pd.read_csv(file)\r\n data.index = list(data.iloc[:,0])\r\n data = data.iloc[:,1:]\r\n \r\n return data", "def make_corpus(txtfile, word_int, as_strings=False):\n corp = []\n li = load_help(txtfile)\n \n for i in xrange(len(li)):\n if li[i] == 'Value':\n doc = li[i+1]\n doc = doc.strip()\n doc = doc.strip('[')\n doc = doc.strip(']')\n doc = doc.split(', ')\n doc = [str(w) for w in doc]\n \n idoc = []\n for w in doc:\n try:\n i = word_int[w]\n if as_strings:\n idoc.append(w)\n else:\n idoc.append(int(i))\n except:\n pass\n \n corp.append(np.array(idoc))\n \n return corp", "def text_to_columns(text):\n parags = text.split(\"\\n\\n\")\n blocks = []\n for p in parags:\n block = splitter(p)\n blocks.append(block)\n output = \"\"\n for linechunks in zip_longest(*blocks, fillvalue=\"\"):\n line = \"\"\n for lc in linechunks[:-1]:\n line += lc + (COL_WIDTH + COL_SPACE - len(lc)) * \" \"\n line += linechunks[-1]\n output += line + \"\\n\"\n return output", "def read_data(input_file):\n\n def process_line(labels, words):\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append((l, w))\n words = []\n labels = []\n return words, labels, lines\n\n rf = open(input_file, 'r')\n lines = [];\n words = [];\n labels = []\n for line in rf:\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n # here we dont do \"DOCSTART\" check\n\n if len(line.strip()) == 0: # and words[-1] == '.'\n words, labels, lines = process_line(labels, words)\n words.append(word)\n labels.append(label)\n rf.close()\n return lines", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def parse_data(filename):\r\n labels = []\r\n documents = []\r\n with open(filename, 'r') as f:\r\n for line in f:\r\n values = line.split()\r\n label = values[0]\r\n document = []\r\n for wordCount in values[1:]:\r\n parsed = wordCount.split(':')\r\n word = parsed[0]\r\n count = int(parsed[1])\r\n document.append((word, count))\r\n labels.append(label)\r\n documents.append(document)\r\n return (labels, documents)", "def read(self):\n dataset = Dataset()\n\n file_list = glob.glob(str(self.directory + \"/*.txt\"))\n\n for file_path in file_list:\n file_name = os.path.basename(file_path)\n\n docid, partid_prefix, = file_name.replace('.txt', '').split('-', 1)\n # partid_prefix not complete due to multiple part cration for a single .txt file\n\n if 'Abstract' in partid_prefix:\n is_abstract = True\n else:\n is_abstract = False\n\n with open(file_path, encoding='utf-8') as file:\n text_raw = file.read()\n\n text = text_raw.replace('** IGNORE LINE **\\n', '')\n paragraph_list = text.split('\\n\\n')\n\n # inital offset for raw_text\n tot_offset = text_raw.count('** IGNORE LINE **\\n') * 18\n offsets = [tot_offset]\n\n for i, text_part in enumerate(paragraph_list):\n # if text is empty (usually last text due to splitting of \"\\n\\n\")\n if text_part != \"\":\n partid = \"{}-p{}\".format(partid_prefix, i + 1)\n\n if docid in dataset:\n dataset.documents[docid].parts[partid] = Part(text_part, is_abstract=is_abstract)\n else:\n document = Document()\n document.parts[partid] = Part(text_part, is_abstract=is_abstract)\n dataset.documents[docid] = document\n\n # add offset for next paragraph\n tot_offset += len(text_part) + 2\n offsets.append(tot_offset)\n\n # to delete last element\n del offsets[-1]\n\n # annotations\n with open(file_path.replace('.txt', '.ann'), encoding='utf-8') as f:\n reader = csv.reader(f, delimiter='\\t')\n for row in reader:\n if row[0].startswith('T'):\n entity_type, start, end = row[1].split()\n start = int(start)\n end = int(end)\n text = row[2]\n\n partid = None\n part_index = None\n\n for i in range(len(offsets) - 1):\n if offsets[i+1] > start:\n part_index = i\n break\n\n if part_index is None:\n part_index = len(offsets) - 1\n\n partid = \"{}-p{}\".format(partid_prefix, part_index + 1)\n real_start = start - offsets[part_index]\n real_end = end - offsets[part_index]\n calc_ann_text = document.parts[partid].text[real_start : real_end]\n\n if calc_ann_text != text:\n print(\" ERROR\", docid, part_index, partid, start, offsets, real_start, \"\\n\\t\", text, \"\\n\\t\", calc_ann_text, \"\\n\\t\", document.parts[partid].text)\n\n if entity_type == 'mutation':\n ann = Entity(self.mut_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n elif entity_type == 'gene':\n ann = Entity(self.gene_class_id, real_start, text)\n dataset.documents[docid].parts[partid].annotations.append(ann)\n\n return dataset", "def get_formatted_data(line, indices=None):\n\tfile_data = str.strip(line).split(' ')\n\tif indices is None:\n\t\tdata = list(range(len(file_data)))\n\telse:\n\t\tdata = list(indices)\n\t\t\n\tfor i, file_column in enumerate(data):\n\t\tif file_column is not None:\n\t\t\tdatum = file_data[file_column]\n\t\telse:\n\t\t\tdatum = ' '\n\t\tif '.' in datum:\n\t\t\ttry:\n\t\t\t\tdatum = float(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdatum = int(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdata[i] = datum\n\treturn data", "def split_data_corpus(filename):\n\n fid = 1\n with open(filename, 'r') as infile:\n f = open('%s-%s.txt' % (filename.strip('.txt'), fid), 'wb')\n for line, doc in enumerate(infile):\n f.write(doc)\n if not line % 1000 and line > 1:\n f.close()\n fid += 1\n f = open('%s-%s.txt' % (filename.strip('.txt'), fid),\n 'wb')\n f.close()", "def convert_txt_to_data():\n pass", "def loadtxt(filepath,comments='#',delimiter=None,skiprows=0,usecols=None,index_offset=1):\n d = np.loadtxt(filepath,comments=comments,delimiter=delimiter,skiprows=skiprows,usecols=usecols)\n if d.shape[1] < 3:\n raise ValueError('invalid number of columns in input')\n row = d[:,0]-index_offset\n col = d[:,1]-index_offset\n data = d[:,2]\n shape = (max(row)+1,max(col)+1)\n return csr_matrix((data,(row,col)),shape=shape)", "def parse_from_text_file(path):\n with open(path, 'r') as f:\n lines = f.read().split('\\n')\n res = []\n for l in lines:\n print(l)\n if len(l) < 1 or not l[0] in 'X#' :\n continue\n l = l.replace('#', str(BLOCK.WALL))\n l = l.replace('X', str(BLOCK.WALL))\n l = l.replace(' ', str(BLOCK.ROAD))\n l = l.replace('@', str(BLOCK.ROBOT))\n l = l.replace('+', str(BLOCK.ROBOT))\n l = l.replace('.', str(BLOCK.GOAL))\n l = l.replace('$', str(BLOCK.DIAM))\n l = l.replace('*', str(BLOCK.DIAM_ON_GOAL))\n l = [ int(x) for x in l]\n res.append(l)\n print(res)\n return res", "def _gen_data(fhs, columns, sep):\n for fh in fhs:\n for line in fh:\n if line[0] == \"#\": continue\n toks = line.split(sep)\n yield toks[columns[0]], int(toks[columns[1]]), float(toks[columns[2]])", "def findDocumentsTwo():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed2.csv', 'r') as readfile,\\\n open('documentsTwo.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided4 = counterTwo / 4\n lines4 = lineTwo / 4\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided4: ' + str(divided4) + '\\n')\n writefile.write('lines divided by 4: ' + str(lines4) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines4) + '\\n')\n writefile.write('3: ' + str((lines4 * 2)) + '\\n')\n writefile.write('4: ' + str((lines4 * 3)))\n print('divided4: ' + str(divided4))\n print('lines divided by 4: ' + str(lines4))", "def parse_design(self, detailed_design_file):", "def create_from_file(self, file):\n self.value = []\n with open(file, \"r\") as f:\n fl = f.readlines()\n\n for l in fl:\n self.value.append([int(x) for x in l.split()])", "def read_file(infile,column_num):\n\n \n column_list = []\n\n with open(infile,'r') as f:\n\n fl = f.readlines()\n\n for line in fl:\n \n \n value = int(line.split()[int(column_num)-1])\n column_list.append(value)\n\n\n return column_list", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def load_data(filename):\n\n with open(filename) as f_obj: # Open file to read & assign file object\n for line in f_obj: # Read each line as text\n print(int(line)) # Convert to int & display", "def format_input(file):\n raw_grid=\"\"\n for line in open(file, \"r\").readlines():\n for ch in line:\n if ch in \"0123456789\":\n raw_grid += ch\n return raw_grid", "def parse_plain_text_export(text_file):\n\n text_file.seek(0)\n for line in text_file.readlines():\n urls = re.findall(URL_REGEX, line) if line.strip() else ()\n for url in urls:\n yield {\n 'url': url,\n 'timestamp': str(datetime.now().timestamp()),\n 'title': None,\n 'tags': '',\n 'sources': [text_file.name],\n }", "def csv(file):\n\n def atoms(lst):\n return map(atom, lst)\n\n def atom(x):\n try:\n return int(x)\n except:\n try:\n return float(x)\n except ValueError:\n return x\n\n for row in rows(file, prep=atoms):\n yield row", "def findDocumentsThree():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed3.csv', 'r') as readfile,\\\n open('documentsThree.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided2 = counterTwo / 2\n lines2 = lineTwo / 2\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided2: ' + str(divided2) + '\\n')\n writefile.write('lines divided by 2: ' + str(lines2) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines2))\n print('divided2: ' + str(divided2))\n print('lines divided by 2: ' + str(lines2))", "def preprocess(doc_in, doc_out):\n def output(text, doc_id):\n doc_out.write(doc_id + \"\\n\")\n doc_out.write(text.replace(\"\\n\", \" \") + \"\\n\\n\")\n\n def filter_text(t):\n filtered_out = [\"<P>\", \"</P>\"]\n r = t\n for f in filtered_out:\n r = r.replace(f, \" \")\n return r\n\n\n doc_id = None\n reading_text = False\n text = \"\"\n for line in doc_in:\n if(str_text_start in line):\n if(reading_text):\n warning(\"Found \" + str_text_start + \" in text\")\n if(not doc_id):\n warning(\"Reading text without knowing id\")\n continue\n reading_text = True\n continue\n if((str_text_stop in line) and reading_text):\n output(text, doc_id)\n text = \"\"\n reading_text = False\n doc_id = None\n doc_id_match = pat_doc_no.match(line)\n if(doc_id_match):\n doc_id = doc_id_match.group(1)\n if(reading_text):\n warning(\"Found doc id in text\")\n continue\n if(reading_text):\n text = text + filter_text(line)", "def data_parser(filepath):\n d = [int(line) for line in open(filepath)]\n return (int(s) for s in d)", "def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # get the data\n data = requests.get(page_url)\n\n # load data into bs4\n soup = BeautifulSoup(data.text, 'html.parser')\n # links = []\n pdf_dis = []\n dates = []\n table = []\n version_hash_fields = []\n\n for tr in soup.find_all('tr'):\n date_col = soup.find_all('td', attrs={'class': 'fd-col2'})\n hyperlink_col = soup.find_all('td', attrs={'class': 'fd-col1'})\n values = [td.text for td in tr.find_all('td')]\n table.append(values)\n for link in hyperlink_col:\n pdf_url = 'https://www.health.mil/' + link.find('a')['href']\n pdf_di = DownloadableItem(doc_type='pdf',\n web_url=pdf_url)\n pdf_dis.append(pdf_di)\n for date in date_col:\n dates.append(date.text)\n\n doc_nums = []\n doc_titles = []\n doc_names = []\n for row in table[1:]:\n doc_data = row[0].split(':')\n\n if len(doc_data) == 1: # if no colon then no doc number\n if doc_data[0] == \"(DTM)-19-004 -Military Service by Transgender Persons and Persons with Gender Dysphoria (Change 1)\":\n doc_nums.append(\"19-004\")\n doc_names.append(\"DTM\")\n doc_titles.append(doc_data[0][14:])\n version_hash_fields.append({\"doc_name\": 'DTM', \"doc_title\": doc_data[0][14:]})\n else:\n doc_nums.append(\" \")\n doc_titles.append(doc_data[0])\n doc_names.append(doc_data[0])\n version_hash_fields.append({\"doc_name\": doc_data[0], \"doc_title\": doc_data[0]})\n else:\n\n tmptitle = doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\")\n\n if \"Volume\" in tmptitle:\n doc_nums.append(doc_data[0][7:]+\" Volume \"+tmptitle.split()[-1])\n else:\n doc_nums.append(doc_data[0][7:])\n doc_titles.append(doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\"))\n doc_names.append(doc_data[0][:6])\n\n version_hash_fields.append({\"doc_name\": doc_data[0][:7], \"doc_title\": doc_data[1]})\n\n parsed_docs = []\n page_url = 'https://www.health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n num_docs = len(doc_nums)\n for i in range(num_docs):\n # put all the relevant info into dictionaries\n doc = Document(doc_type=doc_names[i].replace(\" \",\"-\"),\n doc_title=doc_titles[i],\n doc_num=doc_nums[i],\n doc_name=doc_names[i].replace(\" \",\"-\")+\" \"+doc_nums[i],\n publication_date=dates[i],\n cac_login_required=False,\n crawler_used='dha_pubs',\n source_page_url=page_url,\n downloadable_items=[pdf_dis[i]],\n version_hash_raw_data=version_hash_fields[i])\n parsed_docs.append(doc)\n\n return parsed_docs", "def tsv_generator(file):\n for line in fileinput.input(file):\n article, summary = line.strip().split(\"\\t\")\n yield (article, summary)", "def loadtext2(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def read_grid():\n file = open(\"grid_problem11.txt\" , 'r')\n grid = []\n for line in file:\n line_list = line.split(\" \")\n row = []\n for element in line_list:\n int_element = int(element)\n row.append(int_element)\n\n grid.append(row)\n\n return grid", "def read_by_word(skip_lines):\n\n drop_mathrm = re.compile(r'\\\\(mathrm|rm)\\{(?P<mathrm>.*?)\\}')\n merge_colname = re.compile(r' *_')\n skip_latex = str.maketrans('', '', '{}$\\\\')\n with open(filename, 'r') as param_file:\n for line in param_file:\n if line.startswith('References.'):\n return\n if skip_lines > 0:\n skip_lines -= 1\n else:\n for word in merge_colname.sub(\n '_',\n drop_mathrm.sub(r'\\g<mathrm>',\n line).translate(skip_latex)\n ).split():\n yield word", "def read(self, stream):\n root = []\n headings = []\n columns = []\n\n lines = [line.rstrip() for line in stream.read().splitlines()]\n\n if (not args.headings) or args.loose_headings:\n \"\"\"\n Most columns are probably left-justified but some (like numeric data) might be right-justified. We need to\n examine all the lines to see where each column begins and ends. We'll consider a column complete when we reach\n the end of a column where the same position is whitespace on all of the lines.\n \"\"\"\n\n c = 0\n start = 0\n while any([c < len(line) for line in lines]):\n if all([line[c:c+1].ljust(1) in string.whitespace for line in lines]) and \\\n any([line[start:c].strip() for line in lines]):\n \"\"\"\n Remember the beginning and end of this column\n \"\"\"\n columns.append((start, c))\n start = c\n c += 1\n\n \"\"\"\n Complete the trailing column\n \"\"\"\n if any([line[start:].strip() for line in lines]):\n columns.append((start, sys.maxsize))\n else:\n if lines:\n maxlen = max([len(line) for line in lines])\n delimiters = list(re.finditer('(\\s{2,})', lines[0]))\n if delimiters:\n if delimiters[0].start(1) > 0:\n log.debug('First delimiter: {}:{} {!r}'.format(delimiters[0].start(1), delimiters[0].end(1), delimiters[0].group(1)))\n columns.append((0, delimiters[0].end(1)))\n else:\n parser.error('Leading columns in heading row no allowed')\n for (pos, delimiter) in enumerate(delimiters):\n columns.append((delimiter.end(1), maxlen if pos + 1 == len(delimiters) else delimiters[pos + 1].end(1)))\n else:\n columns = [(0, maxlen)]\n else:\n parser.error('No heading row')\n\n log.debug('columns: {columns}'.format(**locals()))\n\n if args.headings and lines:\n headings = [lines[0][stops[0]:stops[1]].strip() for stops in columns]\n\n for line in lines[1 if args.headings else 0:]:\n if args.headings:\n root.append({headings[num]: line[start:stop].strip() for (num, (start, stop)) in enumerate(columns)})\n else:\n root.append([line[start:stop].strip() for (start, stop) in columns])\n\n return (root, headings)", "def toBlock_txt(filename):\n blocks = []\n block = []\n \n for line in open(filename).readlines()[3:-3]:\n spline = line.split(\"\\t\")\n if not re.search(\"[0-9]\", line):\n if block != []:\n blocks.append(block)\n block = []\n else:\n for i in spline[2:-2]:\n if re.search(\"[0-9]\", i):\n block.append(float(\"\".join([chr(j) for j in map(ord, i) if j > 0])))\n\n return blocks", "def text_to_columns(text):\n cols = []\n for paragraph in text.split(\"\\n\\n\"):\n col_lines = textwrap.fill(paragraph, width=COL_WIDTH).split(\"\\n\")\n cols.append(col_lines)\n\n output = []\n # need zip_longest otherwise text will get lost\n for row in zip_longest(*cols, fillvalue=''):\n output.append(_format(row))\n \n return \"\\n\".join(output)\n\n\n\n >>> text_to_columns(text)", "def txt_to_dataframe(folder,name_parcellation):\n column_weight = ['patients','degree', 'density', 'global_efficiency', 'transitivity', 'assortavity', 'clustering_coef',\n 'fiedler_value', 'small_worldness','Null']\n\n file_name=folder+name_parcellation+'.txt'\n data=pd.read_csv(file_name,header=None,delimiter=';')\n data.columns=column_weight\n data=data.drop(['Null'],axis=1)\n file_len=folder+name_parcellation+'_len.txt'\n data_len=only_connected_patients(file_len)\n data_len=data_len.values\n data['length']=data_len\n data=data[data['length']>-1.0]\n data=data.reset_index(drop=True)\n return data", "def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)", "def text_to_columns(text):\n split_text = [i.strip() for i in text.split(\"\\n\\n\")]\n text2col = [wrap(i.strip(), COL_WIDTH) for i in split_text]\n result = []\n for i in zip(*text2col):\n result.append(\" \".join(i))\n return \"\".join(result)", "def parsePuzzle(fileName):\n data = []\n f = open(fileName, 'r')\n for line in f:\n splitLine = line.split(sep=\" \")\n row = []\n if len(splitLine) >= 9:\n for i in range(9):\n row.append(int(splitLine[i]))\n data.append(row)\n f.close()\n return SudokuPuzzle(data)", "def pipeline(file):\n # special processing is performed to avoid sentence boundaries after abbrevs\n doc = nlp(text_processing.preprocess_text_ents(file))\n grid = get_grid(doc)\n distrib = get_distrib(grid, doc)\n return get_feats(distrib)", "def doc_loader_3_english(doc_path=doc_path_english ,num_samples=1280):\r\n print(\"Loading Doc.....\")\r\n row_cnt = 0\r\n with open(doc_path,'r', encoding='UTF-8') as f:\r\n #content = f.read() # Loads the whole FIle ## CAUTION :- May result in memory overload , solution dataset obj/ generator\r\n for row in f:\r\n row_cnt += 1\r\n #print(row_cnt)\r\n if num_samples != None:\r\n if row_cnt <= num_samples:\r\n temp_row = [int(i) for i in row.split()]\r\n if len(temp_row) < max_len:\r\n temp_row = temp_row + [0] * (max_len - len(temp_row))\r\n yield (temp_row)\r\n else:\r\n temp_row = temp_row[:max_len+1]\r\n yield (temp_row)\r\n else:\r\n break\r\n else:\r\n temp_row = [int(i) for i in row.split()]\r\n if len(temp_row) < max_len:\r\n temp_row = temp_row + ([0] * (max_len - len(temp_row)))\r\n yield (temp_row)\r\n else:\r\n temp_row = temp_row[:max_len + 1]\r\n yield (temp_row)", "def get_table_from_file(file_name):\n try:\n with open(file_name, \"r\") as file:\n lines = file.readlines()\n\n table = [element.replace(\"\\n\", \"\").split(\"\\t\") for element in lines]\n\n nodes = list()\n for node in table:\n new_node = []\n for coordinate in node:\n new_node.append(float(coordinate))\n\n nodes.append(new_node)\n\n return nodes\n\n except FileNotFoundError as f:\n raise f from None\n except Exception as e:\n raise e from None", "def convert_text_to_df(text):\n new_list = [i.strip() for i in text.splitlines() if i.strip() != \"\"]\n new_dict = {}\n col_name = new_list[0].strip().split()\n index_name = new_list[1].strip()\n for item in new_list[2:]:\n index, *others = item.split()\n others = [float(i) for i in others]\n new_dict[index] = others\n new_df = pd.DataFrame(new_dict).transpose()\n new_df.index.name = index_name\n new_df.columns = col_name\n return new_df", "def extract_numbers(text_file, identifier, columns=None):\n with open(text_file) as fnm:\n for line in fnm:\n if identifier in line:\n labels = line.split()\n break\n else:\n raise ValueError(\"{} not found \\\n in the file {}\".format(identifier, text_file))\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n data = np.genfromtxt(fnm, usecols=columns, invalid_raise=False)\n\n # ~ is a shorthand for numpy.logical_not\n data = data[~np.isnan(data).any(axis=1)].T\n\n result = {label: data[i].copy() for i, label in enumerate(labels)}\n return result", "def snippetyielder(filename):\n\ttext = open(filename, \"r\")\n\ta = text.readlines()\n\tp = \"\".join(a) \t #detecting the breaks between documents and identifying them to break the docs with\n\n\n\tdocbreak = re.sub(r\".*([1\\?RU]+[ce][j~p]+o[rtd\\*]+ .[2Jf].*)\",r\"DOCBREAK \\1\",p)\n\tdocbreak = re.sub(r\"(.*[lL]ett.*fro[mn].*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Petition .f.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Order o[/f].*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(General order of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Special order of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Unofficial letter of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Letter of .*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*([\\[I\\]]\\s*T[cue]legram.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(\\[Enclosure.+\\].*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Extracts* from .*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(A[hb]stract[of ]*log.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Instructions from.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(A[hb]stract of statement.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Instructions* of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Memorandum from.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*([llifM]+emorandum of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Communication from.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Statement of circumstances.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Further report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Second report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Additional report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Detailed repor[~t] of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(General report of.*)\",r\"DOCBREAK \\1\",docbreak)\n\tdocbreak = re.sub(r\".*(Deposition of.*)\",r\"DOCBREAK \\1\",docbreak)\n\t# docbreak = re.sub(r\"(DOCBREAK)+\",r\"DOCBREAK\\n\",docbreak) \t\n\tdocbreaks = docbreak.split(\"DOCBREAK\") \t #yielding one document at a time\n\tfor doc in docbreaks:\n\t\tif re.search(r\".+\",doc): \t\n\t\t\tyield doc", "def readOFColumnData(dataFile,nCol):\n fileCheck(dataFile) # does the file exists ? Stop if not.\n #\n # Init list\n data = []\n #\n for line in fileinput.input(dataFile):\n # remove parenthesis if any\n line = line.replace('(', '')\n line = line.replace(')', '') \n # divide each element of the line into words\n words = line.split()\n if words: # if there is a line in fact\n if words[0][0]!='#': #do something only if not comment \n data.append(float(words[nCol])) \n # \n return data", "def create_table(file_to_use):\n lines = []\n for line in file_to_use:\n lines.append(line.split(\",\"))\n lines[-1][-1] = lines[-1][-1][:-1]\n return lines", "def idsFromDocumentation(filename):\n\tidsInOrder = []\n\tsegment = \"\"\n\twith open(filename) as f:\n\t\tfor l in f:\n\t\t\tif \"<h2\" in l:\n\t\t\t\tsegment = l.split(\">\")[1].split(\"<\")[0]\n\t\t\tif 'id=\"SCI_' in l:\n\t\t\t\tidFeature = l.split('\"')[1]\n\t\t\t\t#~ print(idFeature)\n\t\t\t\tidsInOrder.append([segment, idFeature])\n\treturn idsInOrder", "def list_to_file(sorted_list, filename):\n doc = Document()\n table = doc.add_table(rows=1, cols=2)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Word'\n hdr_cells[1].text = 'Occurrence'\n\n for key, value in sorted_list:\n row_cells = table.add_row().cells\n row_cells[0].text = key\n row_cells[1].text = str(value)\n\n doc.save(\"sorted - \" + filename)", "def data_parser(filepath):\n tmp = open(filepath).read().split('\\n')\n return [int(x) for x in tmp]", "def process_glove_data(filename):\r\n\r\n word_list = []\r\n embed_list = []\r\n with open(filename,encoding=\"utf8\") as file:\r\n lines = file.readlines()\r\n for line in lines:\r\n toks = line.split(' ')\r\n word_list.append(toks[0])\r\n vec = [float(tok) for tok in toks[1:]]\r\n embed_list.append(vec)\r\n \r\n embed = np.array(embed_list,dtype=float)\r\n embed_df = pd.DataFrame(embed,index=word_list)\r\n embed_df.index = embed_df.index.str.lower()\r\n \r\n return embed_df", "def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()", "def parse_trflp(lines):\r\n\r\n sample_ids = []\r\n otu_ids = []\r\n data = []\r\n non_alphanum_mask = re.compile('[^\\w|^\\t]')\r\n # not sure why the above regex doesn't cover the following regex...\r\n dash_space_mask = re.compile('[_ -]')\r\n\r\n for i, line in enumerate(lines):\r\n elements = line.strip('\\n').split('\\t')\r\n\r\n # special handling for the first line only\r\n if i == 0:\r\n # validating if the file has a header\r\n if elements[0] == '':\r\n for otu_id in elements[1:]:\r\n otu_ids.append(non_alphanum_mask.sub('_', otu_id))\r\n continue\r\n else:\r\n for j, otu_id in enumerate(elements[1:]):\r\n otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n\r\n # handling of all other lines\r\n current_row = []\r\n\r\n # converting each value in the row to int\r\n for count in elements[1:]:\r\n try:\r\n current_row.append(int(round(float(count), 0)))\r\n except ValueError:\r\n current_row.append(0)\r\n\r\n # if the sum of all the values is equial to 0 ignore line\r\n if sum(current_row) == 0:\r\n continue\r\n\r\n # adding sample header to list\r\n sample_ids.append(non_alphanum_mask.sub('.',\r\n dash_space_mask.sub('.', elements[0])))\r\n\r\n # validating the size of the headers to add missing columns\r\n # this is only valid when there is no header\r\n if len(current_row) > len(otu_ids):\r\n # modify header data\r\n extra_cols = []\r\n for j in range(len(otu_ids), len(current_row)):\r\n extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))\r\n # modify data\r\n for j in range(len(data)):\r\n data[j].extend([0] * (len(current_row) - len(otu_ids)))\r\n\r\n otu_ids.extend(extra_cols)\r\n elif len(current_row) < len(otu_ids):\r\n # modify data\r\n current_row.extend([0] * (len(otu_ids) - len(current_row)))\r\n\r\n data.append(current_row)\r\n\r\n return sample_ids, otu_ids, asarray(data).transpose()", "def process_wiki_file(args: Tuple[str, str, int]) -> str:\n filepath, language, min_sent_word_count = args\n with bz2.open(filepath, \"rt\", encoding=\"utf8\") as bz2_file:\n\n # Extract text between <doc> xml tags\n soup = BeautifulSoup(bz2_file.read(), \"lxml\")\n docs = soup.find_all(\"doc\")\n wiki_dump_content = \"\"\n for i, doc in enumerate(docs):\n processed_text = process_wiki_doc_text(\n doc.text, language, min_sent_word_count\n )\n if len(processed_text) == 0:\n continue\n\n # Append to result\n if i > 0 and len(wiki_dump_content) > 0:\n wiki_dump_content += \"\\n\"\n wiki_dump_content += processed_text\n\n return wiki_dump_content", "def gen_review_data(fp: str) -> None:\n with open(fp, encoding='utf-8') as f:\n for line in f:\n data = json.loads(line)\n utils.preprocess_raw_json(data)\n doc = {\n \"_index\": \"review\",\n \"_source\": data\n }\n yield doc", "def _isotope_txt_data(self):\n with open(self.filename + '_txt', mode='rt') as fh:\n txt = fh.readlines()\n\n data = []\n frames = self.header['frames']\n line = 0\n while line < len(txt):\n if txt[line].startswith('B ='):\n Tc = txt[line].split('=')[-1].strip().strip(' ms')\n Tc = float(Tc)/1000\n d = np.loadtxt(txt[line + 2 : line + 2 + frames])\n data.append(d[:, 1]/Tc)\n line += 2 + frames\n line += 1\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts/s'})", "def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def write_segment(live_list, report_list):\n #assert len(live_list) == len(report_list)\n with open(os.path.join(trainDir, 'train_text_data'), 'a') as train_text_data: # a means append mode\n for r, l in zip(report_list, live_list):\n sample = \"abstract=<d> <p> <s> \" + r.strip(\"\\r\\n\") + \"</s> </p> </d>\"\t\\\n + \"\\t\" + \"article=<d> <p> <s> \" + l.strip(\"\\r\\n\") + \\\n \" . </s> </p> </d>\\tpublisher=AFP\"\n train_text_data.write(sample + \"\\n\")", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def text_to_columns(text):\n \n dedented_txt = textwrap.dedent(text).strip()\n dedented_text = dedented_txt.splitlines()\n for line in dedented_text:\n ded_list = [textwrap.fill(line.strip(), initial_indent='', subsequent_indent='', width=20) for line in dedented_text] \n ded_list2=[]\n ded_list2.append(ded_list)\n return print(tabulate(ded_list2, tablefmt ='plain'))", "def parse_2015(year, file):\n with open(file) as file:\n content = file.read()\n # Place, Name, Age, Sex/plc, Sex, Time, Pace, City, State, Bib No,\n # Clock Time, Link (NOTE: not sure why omitting the link works, but it does)\n cols = [\n 'place', 'first_name', 'last_name', 'age', 'sexpl', 'sex',\n 'time', 'pace', 'city', 'state', 'bib', 'clocktime',\n ]\n parser = TDParser(columns=cols)\n parser.feed(content)\n return parser.results", "def parse_txt(txt_path, debug_till_row=None, join_desc=False, return_max_len=False, fraction=1,\n label_prefix=\"__label__\", seed=None):\n\n with open(txt_path, \"r\") as infile:\n if debug_till_row not in [None, -1]:\n data = infile.read().split(\"\\n\")[:debug_till_row]\n else:\n data = infile.read().split(\"\\n\")\n\n max_len = -1\n assert 0 < fraction <= 1\n if fraction < 1:\n if seed is not None:\n np.random.seed(seed)\n size = int(round(fraction * len(data)))\n inds = np.arange(len(data))\n np.random.shuffle(inds)\n data = [data[i] for i in inds[:size]]\n\n descs, labels = [], []\n for row in data:\n row_splitted = row.split()\n num_words = len(row_splitted)\n if num_words == 1:\n continue\n max_len = max(max_len, len(row_splitted))\n\n tmp = []\n for ind, w in enumerate(row_splitted):\n if not w.startswith(label_prefix):\n break\n tmp.append(w[len(label_prefix):])\n\n labels.append(\" \".join(tmp))\n if join_desc:\n descs.append(\" \".join(row_splitted[ind:]))\n else:\n descs.append(row_splitted[ind:])\n\n if return_max_len:\n return descs, labels, max_len\n return descs, labels", "def iterfile(f):\n for line in f:\n fields = line.replace(\"\\n\",\"\").split(\"\\t\")\n yield float(fields[0]), fields[-1].lower()", "def fileUnigene():\n \n with open(gene2unigene, 'r') as unigene,\\\n open(UniGene, 'w') as gene:\n \n header_line = next(unigene)\n header_line= header_line.split(\"\\t\")\n \n \n###################################################################################\n# #\n# #\n# Index Value of columns we need # \n# # \n# #\n################################################################################### \n GeneID_index = header_line.index('#GeneID')\n Unigene_index = header_line.index('UniGene_cluster\\n')\n \n for line in tqdm.tqdm(unigene, 'Time for loop of unigeneConversion'):\n lineList= line.split(\"\\t\")\n if (re.match(r\"^[a-zA-Z]{2,3}[.]([0-9]*)$\", lineList[1])):\n gene.write(lineList[GeneID_index] + \"\\tUniGene\\t\" + str(lineList[Unigene_index]))", "def write_text_file(data, file_name):\n\timport types\n\toutf = open(file_name, \"w\")\n\tif (type(data[0]) == types.ListType):\n\t\t# It is a list of lists\n\t\tfor i in xrange(len(data[0])):\n\t\t\tfor j in xrange(len(data)):\n\t\t\t\tif type(data[j][i]) == type(0):\n\t\t\t\t\toutf.write(\" %12d\"%data[j][i])\n\t\t\t\telse:\n\t\t\t\t\toutf.write(\" %12.5g\"%data[j][i])\n\t\t\toutf.write(\"\\n\")\n\telse:\n\t\t# Single list\n\t\tfor j in xrange(len(data)):\n\t\t\tif type(data[j]) == type(0):\n\t\t\t\toutf.write(\" %12d\\n\"%data[j])\n\t\t\telse:\n\t\t\t\toutf.write(\" %12.5g\\n\"%data[j])\n\toutf.close()", "def read_file(self):\n colspecs = [[0, 7]] # for the id\n names = ['id']\n for question in self.question_list:\n colspecs.extend(question.get_column_range())\n names.extend(question.get_column_names())\n\n self.data = pd.read_fwf(self.file, colspecs=colspecs, encoding=self.encoding, names=names, header=None)\n self.data.fillna(0, inplace=True)\n self.data = self.data.astype(int)\n return self.data", "def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return", "def load_data_from_xsl(file_name):\n\tnlp_data = pd.read_excel(file_name, sheet_name=0, header=0, usecols=[1, 2, 3],\n\t converters={'bug_id': str, 'summary': str, 'description': str})\n\tnlp_data.fillna(' ', inplace=True)\n\n\t# nlp_data['description'] = nlp_data['description'].map(lambda x: clean_str(x+''))\n\n\treturn nlp_data", "def read_txt(self, widths=[3, 21, 4, 6, 4, 6, 12, 12]):\n cols = ['ID', 'SSSSSSSS.mmmuuun', 'AMP', 'THR', 'A-FRQ', 'R-FRQ', 'SIG STRNGTH', 'ABS-ENERGY']\n\n widths = widths\n self.data = pd.read_fwf(self.data_file, widths=widths, header=None, skiprows=self.skip_rows)\n self.data.columns = cols\n\n self.data = self.data.loc[self.data['ID'] == 1]\n self.skip_rows += len(self.data)", "def doc_title_table(title_file):\n with open(title_file , 'r') as f :\n lines = f.readlines()\n raw_text = \"\".join(l for l in lines)\n left_idx_num = [ m.end(0) for m in re.finditer(r\"<num>\",raw_text)]\n right_idx_num = [ m.start(0) for m in re.finditer(r\"</num>\",raw_text)]\n\n left_idx_title = [ m.end(0) for m in re.finditer(r\"<title>\",raw_text)]\n right_idx_title = [ m.start(0) for m in re.finditer(r\"</title>\",raw_text)]\n\n docs_title_dict = {}\n for i in range(len(left_idx_num)):\n docs_title_dict[raw_text[left_idx_num[i]+1:right_idx_num[i]-1]] = raw_text[left_idx_title[i]+1:right_idx_title[i]-1]\n return docs_title_dict", "def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data", "def extract_text(infile):\n # Get text from mudraw\n text = subprocess.check_output(['mudraw', '-F', 'txt', infile])\n\n # Cleanup raw text\n match = re.search(\n r'.*?Activity \\/ Remarks(?P<table1>.*?)Activities not shown on the ' +\n r'DABS Chart Side:.*?Activity \\/ Remarks(?P<table2>.*?)For detailed ' +\n r'information regarding the DABS',\n text,\n re.MULTILINE | re.DOTALL)\n if not match:\n raise ExtractionError('Could not extract text from PDF.')\n false_or_none_string = lambda x: bool(x) and x.lower() != 'none'\n data = '\\n\\n\\n'.join(match.groups())\n raw_parts = re.sub(r'\\n[ \\t]+\\n', '\\n\\n', data).split('\\n\\n\\n')\n parts = filter(false_or_none_string, map(lambda x: x.strip(), raw_parts))\n\n # Write CSV\n headers = (\n b'Firing-Nr\\nD-/R-Area\\nNOTAM-Nr',\n b'Validity UTC',\n b'Lower Limit\\nAMSL or FL',\n b'Upper Limit\\nAMSL or FL',\n b'Location',\n b'Center Point',\n b'Covering Radius',\n b'Activity / Remarks',\n )\n rows = []\n for i, part in enumerate(parts):\n # Regexes\n multiple_newlines_re = re.compile(r'\\n+')\n height_re = re.compile(r'(GND|[0-9]+m \\/ [0-9]+ft|FL[0-9]{2,3}|REF AIP)')\n center_radius_re = re.compile(r'([0-9]{6}N [0-9]{7}E)\\s+?(.*?NM)')\n\n # Separate columns (warning: hackish code ahead!)\n row = {}\n step1 = re.split(r'([0-2][0-9][0-6][0-9] - [0-2][0-9][0-6][0-9])', part)\n row['nr'] = step1[0].strip()\n timestring = '\\n'.join(step1[1:-1])\n row['validity'] = multiple_newlines_re.sub('\\n', timestring)\n step2 = filter(None, height_re.split(step1[-1].strip()))\n row['lower'] = step2[0]\n row['upper'] = step2[2]\n step3 = filter(None, center_radius_re.split(step2[-1].strip()))\n row['location'] = step3[0].strip()\n row['center'] = step3[1].strip()\n row['radius'] = step3[2].strip()\n row['activity'] = multiple_newlines_re.sub('\\n', step3[3].strip())\n\n # Add to list of rows\n rows.append((\n row['nr'].encode('utf8'),\n row['validity'].encode('utf8'),\n row['lower'].encode('utf8'),\n row['upper'].encode('utf8'),\n row['location'].encode('utf8'),\n row['center'].encode('utf8'),\n row['radius'].encode('utf8'),\n row['activity'].encode('utf8'),\n ))\n\n return tablib.Dataset(*rows, headers=headers)", "def parse_lab_data(filename: str):\r\n cur = con.cursor()\r\n cur.execute(\r\n \"\"\"CREATE TABLE Lab (\r\n [Patient_ID] INTEGER PRIMARY KEY,\r\n [Admission_ID] INTEGER,\r\n [Lab_Name] VARCHAR(70),\r\n [Lab_Value] DECIMAL(6,2),\r\n [Lab_Units] VARCHAR(20),\r\n [Lab_Date] VARCHAR(10))\"\"\"\r\n )\r\n with open(filename) as file:\r\n next(file) # O(1)\r\n for line in file: # NM times\r\n content = line.split(\"\\t\") # O(1)\r\n content[3] = float(content[3])\r\n content[5] = content[5].split()[0]\r\n cur.execute(\"INSERT INTO Lab VALUES (?, ?, ?, ?, ?, ?)\", content)\r\n\r\n return", "def _gen_txt_data(self, f):\n\t\treader = iter(f)\n\n\t\tfor line_num, line in enumerate(reader):\n\t\t\tif line_num == 0 and self.has_header:\n\t\t\t\tcontinue\n\n\t\t\tdatum = line.rstrip('\\r\\n')\n\n\t\t\tyield datum, line_num+1", "def add_tabular(self, files, id_column, text_column, prepend_columns=None, encoding='utf8',\n doc_label_fmt='{basename}-{id}', force_unix_linebreaks=True, **kwargs):\n\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('package `pandas` must be installed to use this function')\n\n if isinstance(files, str):\n files = [files]\n\n read_opts = {\n 'encoding': encoding,\n 'usecols': [id_column, text_column]\n }\n\n if prepend_columns:\n require_listlike(prepend_columns)\n read_opts['usecols'] += prepend_columns\n\n if all(isinstance(x, int) for x in read_opts['usecols']):\n id_column, text_column = 0, 1\n if prepend_columns:\n prepend_columns = list(range(2, len(prepend_columns) + 2))\n\n read_opts.update(kwargs)\n\n read_opts_excel = read_opts.copy()\n del read_opts_excel['encoding']\n\n for fpath in files:\n if fpath.endswith('.csv'):\n data = pd.read_csv(fpath, **read_opts)\n elif fpath.endswith('.xls') or fpath.endswith('.xlsx'):\n data = pd.read_excel(fpath, **read_opts_excel)\n else:\n raise ValueError('only file extensions \".csv\", \".xls\" and \".xlsx\" are supported')\n\n basename, _ = os.path.splitext(fpath)\n basename = os.path.basename(basename).strip()\n\n for idx, row in data.iterrows():\n doclabel = doc_label_fmt.format(basename=basename, id=row[id_column], row_index=idx)\n\n if doclabel in self.docs:\n raise ValueError(\"duplicate label '%s' not allowed\" % doclabel)\n\n if prepend_columns:\n text = '\\n\\n'.join([row[col] for col in (prepend_columns + [text_column]) if pd.notna(row[col])])\n else:\n text = row[text_column] if pd.notna(row[text_column]) else ''\n\n if force_unix_linebreaks:\n text = linebreaks_win2unix(text)\n\n self.docs[doclabel] = text\n self.doc_paths[doclabel] = fpath + ':' + str(idx)\n\n return self", "def process_file(f):\n\n header = f.readline()\n if header.startswith(\"\\t\"):\n header = header[1:]\n cell_file_names = header.split(FS)\n\n map(list, zip(*[(1, 2), (3, 4), (5, 6)]))\n\n [cell_names, donor_nums, tissue_types, cell_nums] = map(list, zip(\n *[ extract_cell_name_data(x) \n for x in cell_file_names\n ]\n ))\n\n for line in f:\n\n toks = line.split(FS)\n gene_string = toks[0]\n\n (ensembl_gene, gene) = extract_gene_and_ensembl(gene_string)\n\n expr_vals = toks[1:len(toks)]\n\n for i in range(len(expr_vals)):\n if float(expr_vals[i]) == 0:\n continue\n\n # non-zero value\n output_line(cell=cell_names[i],\n donor_num=donor_nums[i],\n tissue_type=tissue_types[i],\n cell_num=cell_nums[i],\n ensembl_gene=ensembl_gene,\n gene=gene,\n expression=expr_vals[i])", "def score_integerize():\n for file_n in annual_report_indexes:\n print file_n\n\n data_frame = fu.read_file_to_df(corporation_index_file_url, file_n + '_index')\n data_frame['int_score'] = data_frame[u'企业总评分'.encode('utf-8')].apply(lambda x: round(x))\n\n fu.write_file(data_frame, corporation_index_file_url, file_n + '_index')", "def column_creator(path):\n if not os.path.exists(path+'tables'):\n os.makedirs(path+'tables')\n\n\n # Sequences\n if os.path.exists(path+'SEQ.txt'):\n with open(os.path.join(path+'SEQ.txt')) as f1, open(os.path.join(path+'tables/sequences_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Modifications\n if os.path.exists(path + 'modifications.txt'):\n\n with open(os.path.join(path+'modifications.txt')) as f1, open(os.path.join(path+'tables/modifications_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n # Spectrum identify:\n if os.path.exists(path + 'spectrum_identify.txt'):\n\n with open(os.path.join(path+'spectrum_identify.txt')) as f1, open(path+'tables/spectrum_ide_table.txt', 'a') as f3:\n lines1 = f1.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines1))\n f1.close()\n f3.close()\n\n if os.path.exists(path + 'spectrum_unidentify.txt'):\n with open(os.path.join(path+'spectrum_unidentify.txt')) as f2, open(path+'tables/spectrum_unide_table.txt', 'a') as f3:\n lines2 = f2.read().count('\\n')\n f3.write(\"%s\\n%s\\n\" % (\"Spectrum Number\",lines2))\n f2.close()\n f3.close()\n\n if os.path.exists(path+'taxonomy_identify.txt'):\n # Taxonomy ide:\n with open(os.path.join(path+'taxonomy_identify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_ide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()\n\n\n if os.path.exists(path + 'taxonomy_unidentify.txt'):\n # Taxonomy unide:\n with open(os.path.join(path+'taxonomy_unidentify.txt')) as f1, open(os.path.join(path+'tables/taxonomy_unide_table.txt'), 'a') as f2:\n c = Counter(x.strip() for x in f1)\n for x in c:\n f2.write(\"%s\\t%s\\n\" % (x, str(c[x])))\n f1.close()\n f2.close()", "def ReadIndex_text(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n idList = []\n v1 = array('B') # dbfile index\n v2 = array('L') # offset\n v3 = array('I') # block size\n apd1 = idList.append\n apd2 = v1.append\n apd3 = v2.append\n apd4 = v3.append\n indexFileHeaderText = []\n origdbname=\"\"\n origversion=\"\"\n origext=\"\"\n origprefix=\"\"\n try:\n\n hdl = mybase.ReadLineByBlock(indexfile)\n lines = hdl.readlines()\n while lines != None:\n for line in lines:\n if not line or line[0] == \"#\":\n continue\n strs = line.split()\n if strs[0] == \"DEF_DBNAME\":\n if len(strs)>=2:\n origdbname=strs[1]\n elif strs[0] == \"DEF_VERSION\":\n if len(strs)>=2:\n origversion=strs[1]\n elif strs[0] == \"DEF_EXTENSION\":\n if len(strs)>=2:\n origext=strs[1]\n elif strs[0] == \"DEF_PREFIX\":\n if len(strs)>=2:\n origprefix = strs[1]\n else:\n apd1(strs[0])\n apd2(int(strs[1]))\n apd3(int(strs[2]))\n apd4(int(strs[3]))\n lines = hdl.readlines()\n\n indexList.append(idList)\n indexList.append(v1)\n indexList.append(v2)\n indexList.append(v3)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n\n numRecord = len(idList)\n lastDBFileIndex = v1[numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n if isPrintWarning:\n if origversion == \"\":\n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0],indexfile,\n origversion, version), file=sys.stderr)\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def file_parser(file_name):\n h = 480\n w = 640\n out = []\n with open(file_name, 'r') as f:\n line_num = 1\n for line in f:\n if line_num < 17:\n # Read to where data starts\n line_num += 1\n continue\n elif line_num > 74:\n break\n # print(list(map(int, line.strip().split(\" \"))))\n vals = line.split()\n # print(list(\"\".join(line)))\n # print(line.split())\n assert(float(vals[2]) < 640)\n assert(float(vals[3]) < 480)\n point = [float(vals[2]) * w, float(vals[3]) * h]\n # print(point)\n out.append(point)\n line_num += 1\n\n out.append([0,0])\n out.append([w-1, 0])\n out.append([0, h-1])\n out.append([w-1, h-2])\n return out", "def parseLabels(filename):\n r = parse(filename)\n res = {}\n for qid in r:\n lst = []\n for y in r[qid].split(\";\"):\n doc, score = y.split(\",\")\n lst.append((int(doc), float(score)))\n res[qid] = lst\n return res", "def convert(label, tags, categories, projects, view, featured):\n\n filename = f\"content/publication/{label}/index.md\"\n content = readfile(filename)\n if featured: \n content = content.replace(\"featured: false\", f'featured: true') \n if tags: \n content = content.replace(\"tags: []\", f'tags: [\"{tags}\"]') \n if categories: \n content = content.replace(\"categories: []\", f'categories: [\"{categories}\"]') \n if projects: \n content = content.replace(\"projects: []\", f'projects: [\"{projects}\"]')\n writefile(filename, content)\n if view:\n print(content)", "def process_to_text(rawfile, txtfile, field: int=None):\n\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info(\"Processing %s to %s\", rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.xml'): # IWSLT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.txt'): # wmt17/ms\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'): # MTNT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)", "def main(nlp, file_path, final_file_path, from_line=0, to_line=None):\n with open(final_file_path, \"w\") as parsed_file:\n with open(file_path) as cnn_dm:\n line = cnn_dm.readline().strip()\n article_idx = 0\n while article_idx < from_line:\n line = cnn_dm.readline().strip()\n article_idx += 1\n if to_line is None:\n while line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{} articles processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()\n else:\n while article_idx < to_line and line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{}th article processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()", "def main():\n input_file = sys.argv[1]\n target_width = int(sys.argv[2]) * 2\n\n to_write = \"\"\n \n print(\"Processing: %s\" % input_file)\n\n with open(input_file,\"r\") as fh:\n for line in fh.readlines():\n slices = line[:-1]\n \n endian_buf = []\n\n while(len(slices) > 0):\n k = slices[0:target_width]\n endian_buf.insert(0,k+\"\\n\")\n slices = slices[target_width:]\n\n for b in endian_buf:\n to_write += b\n\n with open(input_file,\"w\") as fh:\n fh.write(to_write)", "def main(input_filepath, latex):\n logger = logging.getLogger(__name__)\n\n df = pd.read_csv(input_filepath)\n out = df.head()\n if latex:\n out = out.to_latex()\n print(out)", "def loadtext(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def turn_files_into_pretty_text(text_files):\n list_of_all_lines = []\n for item in text_files:\n for line in item:\n line = line.rstrip()\n if line not in list_of_all_lines:\n list_of_all_lines.append(line)\n\n for item in list_of_all_lines:\n\n words = item.split('|')\n melon = words[0]\n count = words[1]\n amount = words[2]\n\n print \"Delivered {} {}s for total of ${}\".format(count, melon, amount)", "def file_to_tokenized_docs_generator(file_path, encoder, args):\n reader = Reader(file_path)\n string_iterable = reader.stream_data(threaded=False)\n string_iterable = eot_splitting_generator(string_iterable, encoder)\n\n token_list_gen = prep_and_tokenize_generator(string_iterable,\n encoder,\n normalize_with_ftfy=args.normalize_with_ftfy,\n normalize_with_wikitext_detokenize=args.normalize_with_wikitext_detokenize\n )\n return token_list_gen", "def readFile(fname):\n\n fromto = []\n cols = []\n with open(fname , 'r') as f:\n cols = f.readline().split(\",\")[0:4] # Headline\n for line in f:\n tm, frm, to, am = line.split(\",\")[0:4]\n frm = int(frm.lstrip())\n to = int(to.lstrip())\n fromto.append((frm,to))\n return cols, fromto" ]
[ "0.5493499", "0.54529065", "0.5267102", "0.52564555", "0.5165884", "0.5158104", "0.5139018", "0.5111768", "0.50976145", "0.5082693", "0.50638324", "0.50472414", "0.5038567", "0.5021236", "0.5013941", "0.5003135", "0.49983117", "0.49895382", "0.49807015", "0.49714673", "0.49597633", "0.49453783", "0.49351558", "0.49332955", "0.49314943", "0.4921506", "0.49201792", "0.49073872", "0.49036503", "0.4901937", "0.48817852", "0.48751253", "0.48647115", "0.48546508", "0.48534766", "0.4851544", "0.48418388", "0.4841796", "0.4838218", "0.4824403", "0.4824041", "0.48142004", "0.48044834", "0.4804063", "0.4801289", "0.48011324", "0.4796022", "0.4781069", "0.4772303", "0.47606498", "0.47549024", "0.47486085", "0.47485057", "0.47482303", "0.4746201", "0.47435644", "0.47427368", "0.4731151", "0.47183144", "0.47180167", "0.471522", "0.47073826", "0.47017014", "0.47008014", "0.470037", "0.47000834", "0.46969846", "0.46936342", "0.4691427", "0.4689604", "0.46893018", "0.46855798", "0.46827877", "0.46819785", "0.46811417", "0.46800005", "0.46799353", "0.46784037", "0.4676401", "0.4666097", "0.46648547", "0.46629286", "0.46620095", "0.46606192", "0.46522003", "0.4649201", "0.46461514", "0.46451077", "0.4644399", "0.4640793", "0.46393007", "0.4633941", "0.46315727", "0.46274823", "0.4621254", "0.46180436", "0.46066976", "0.46056837", "0.46020707", "0.4601687" ]
0.6535076
0
Write an image to the disk.
def drop_image(imagename, destination, itype="h"): if type(destination) == type(""): if(itype == "h"): imgtype = EMUtil.ImageType.IMAGE_HDF elif(itype == "s"): imgtype = EMUtil.ImageType.IMAGE_SINGLE_SPIDER else: ERROR("unknown image type","drop_image",1) imagename.write_image(destination, 0, imgtype) else: ERROR("destination is not a file name","drop_image",1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, image):\n raise NotImplementedError()", "def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)", "def write_image(self, image_name, image):\n raise NotImplementedError", "def write_image(self, name: str, image_path: str):\n # TODO: implement\n raise NotImplementedError(\"We are working on this!\")", "def write(self, filename):\n\n self.__image.save(filename)", "def _save_image(self, image_name, image, output_dir):\n dst = '{}/{}'.format(output_dir, self._image_filename(image_name))\n os.makedirs(output_dir, exist_ok=True)\n try:\n with open(dst, 'wb') as f:\n for chunk in image.save(named=self.image_registry_name(image_name)):\n f.write(chunk)\n log.info('Image {} saved as {}'.format(image_name, dst))\n except Exception as err:\n if os.path.isfile(dst):\n os.remove(dst)\n raise err", "def write(\n path: Union[Path, str],\n image: np.ndarray) -> None:\n raise NotImplementedError()", "def write_image(path, image):\n image = tf.image.encode_jpeg(image, quality=100)\n return tf.io.write_file(path, image)", "def save_image(image):\n if config['save_images']['enabled']:\n directory = config['save_images']['destination']\n filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S%f\") + '.jpg'\n destination = os.path.join(directory, filename)\n logging.debug('saving image to %s', destination)\n f = open(destination, 'wb')\n f.write(image)\n f.close", "def save_image(image, file_name):\n io.imsave(file_name,image)", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def write_image(self, img, extname=None, extver=None,\n compress=None, tile_dims=None, header=None):\n\n self.create_image_hdu(img,\n header=header,\n extname=extname, extver=extver,\n compress=compress, tile_dims=tile_dims)\n\n if header is not None:\n self[-1].write_keys(header)\n self[-1]._update_info()\n\n # if img is not None:\n # self[-1].write(img)", "def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)", "def imgWrite(img, path):\n dirMake(os.path.dirname(path))\n sitk.WriteImage(img, path)\n\n # Reformat files to be compatible with CIS Software\n #ext = os.path.splitext(path)[1].lower()\n #if ext == \".vtk\": vtkReformat(path, path)", "def save_image(self):\n self.table_to_image.img.save(self.file_name)\n aws.AWSHandler().upload_image(self.file_name)", "def write_image(img, img_name):\n\n cv2.imwrite(img_name, img)", "def imwrite(image, path):\n return scipy.misc.imsave(path, to_range(image, 0, 255, np.uint8))", "def write_img_to_fs(name, data):\n with open(name, \"wb\") as fout:\n fout.write(data)", "def save_image(self, filename):\n raster.save_image(filename, self.image, self.metadata)", "def save_to_image(img, filename):\n filename = os.path.join(datadir, filename + '.png')\n print('Saving: ' + filename)\n img.to_pil().save(filename)", "def _save(self, data: PIL.Image) -> None:\n with self._fs.open(self._filepath, mode=\"wb\") as f:\n data.save(f)", "def save_image(img, path):\n cv2.imwrite(path, img)", "def saveImageAs(self, name):\n\t\tself.image.save(name)", "def save(self, filename):\n self.image.save(filename, self.options.img_format)", "def save_image(path, data):\n misc.imsave(path, data)", "def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)", "def save_image(self):\n self.save()", "def writeimage(self, fp):\n execfile = open(self.binpath, \"w\")\n databuf = fp.read(4096)\n while databuf:\n execfile.write(databuf)\n databuf = fp.read(4096)\n execfile.flush()\n execfile.close()\n os.chmod(self.binpath, stat.S_IRWXU)", "def save(self, filename):\n \n path, name = os.path.split(filename)\n ext = name.split(\".\")[-1]\n _tkExec(self.image.write, filename, format=ext)", "def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)", "def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)", "def imwrite(image, path, quality=95, **plugin_args):\n iio.imsave(path, dtype.im2uint(image), quality=quality, **plugin_args)", "def write_to_file(self, filename):\n\n loader = ImageLoader()\n loader.write(self, filename)", "def save(self, file=None, filename=None):\n if file is None and filename is None:\n raise TypeError('expected an argument')\n elif file is not None and filename is not None:\n raise TypeError('expected only one argument; but two passed')\n elif file is not None:\n if isinstance(file, types.FileType) and hasattr(libc, 'fdopen'):\n fd = libc.fdopen(file.fileno(), file.mode)\n r = library.MagickWriteImageFile(self.wand, fd)\n if not r:\n self.raise_exception()\n else:\n if not callable(getattr(file, 'write', None)):\n raise TypeError('file must be a writable file object, '\n 'but it does not have write() method: ' +\n repr(file))\n file.write(self.make_blob())\n else:\n if not isinstance(filename, basestring):\n raise TypeError('filename must be a string, not ' +\n repr(filename))\n r = library.MagickWriteImage(self.wand, filename)\n if not r:\n self.raise_exception()", "def archive_image(self, img):\n \n try:\n imgname = \"roboimg\" + str(int(time.time())) + \".png\"\n imgpath = os.path.join(self.imgdir, imgname)\n # print(\"Pic name \" + imgpath)\n\n cv2.imwrite(imgpath, img)\n except:\n self.logger.error(\"archive_image failed %s\" % (imgpath))", "def iwrite(im, filename, **kwargs):\n\n # TODO check valid input\n\n ret = cv.imwrite(filename, im, **kwargs)\n\n if ret is False:\n print('Warning: image failed to write to filename')\n print('Image =', im)\n print('Filename =', filename)\n\n return ret", "def dump_image(image, path_image):\n cv2.imwrite(path_image, image)\n return", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def write(self, uri):\n img_to_write = self.msiToWrite.get_image()\n\n # sitk can only write images of dimension 2,3,4. This hack is\n # to fake 1d images as being 2d. 1d images e.g. occure after taking\n # the mean of an image.\n if len(img_to_write.shape) == 1:\n img_to_write = np.reshape(img_to_write, (1, 1, img_to_write.shape[0]))\n\n img = sitk.GetImageFromArray(img_to_write, isVector=True)\n sitk.WriteImage(img, uri)\n logging.info(\"written file \" + uri + \" to disk\")\n return None", "def saveImage(self, event):\r\n fileWritten = self.image.writeFile()\r\n self.statusBar.SetStatusText(\"Saved {}\".format(fileWritten))", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save(cls, image: Image, save_path: typing.Union[str, BytesIO]):\n # print(f\"[save] {save_path}\")\n data = image.get_image_for_save()\n imagej_kwargs = {}\n if image.labels is not None:\n imagej_kwargs[\"Labels\"] = image.labels * image.layers\n coloring = image.get_imagej_colors()\n if coloring is not None:\n imagej_kwargs[\"LUTs\"] = coloring\n ranges = image.get_ranges()\n ranges = np.array(ranges).reshape(len(ranges) * 2)\n # print(ranges)\n imagej_kwargs[\"Ranges\"] = ranges\n spacing = image.get_um_spacing()\n\n metadata = {\"mode\": \"color\", \"unit\": \"\\\\u00B5m\"}\n if len(spacing) == 3:\n metadata.update({\"spacing\": spacing[0]})\n resolution = [1 / x for x in spacing[-2:]]\n cls._save(data, save_path, resolution, metadata, imagej_kwargs)", "def imwrite(img, file_path, params=None, auto_mkdir=True):\n if auto_mkdir:\n dir_name = os.path.abspath(os.path.dirname(file_path))\n os.makedirs(dir_name, exist_ok=True)\n ok = cv2.imwrite(file_path, img, params)\n if not ok:\n raise IOError('Failed in writing images.')", "def img_save(name,img):\n cv2.imwrite(name,img)", "def write(filename, data):\r\n with open(filename, \"wb\") as f:\r\n pic.dump(data, f)", "def save_img(img: np.ndarray, path: str) -> None:\n\n img_obj = Image.fromarray(img)\n img_obj.save(path)", "def save_image(name, image):\n image_name = 'output/' + name + '.png'\n cv2.imwrite(image_name, image)", "def save_as(self, filename):\n opencv.imwrite(filename, self.img)", "def save_output_image_to_directory(self):\n curr_directory = os.path.dirname(os.path.abspath(__file__))\n images_dir = curr_directory + \"/images/\"\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n self.output_image_name = md5(str(uuid4()).encode()).hexdigest() + \".png\"\n image_file_name = images_dir + self.output_image_name\n self.output_image.save(image_file_name)\n logger.info(\"Image file saved locally : %s\", image_file_name)", "def save_image(self, image, image_id):\n # Make sure we're playing with a valid image\n if not image:\n log.error(u'image is invalid: {0}'.format(image))\n return None\n key = self.id_to_key(image_id)\n self.connection.save_image(key, image, self.format)", "def tiffwrite(filename, im):\n tf.imwrite(filename, im)", "def _write_whole_disk_image(image, image_info, device):\n # FIXME(dtantsur): pass the real node UUID for logging\n disk_utils.destroy_disk_metadata(device, '')\n disk_utils.udev_settle()\n\n command = ['qemu-img', 'convert',\n '-t', 'directsync', '-S', '0', '-O', 'host_device', '-W',\n image, device]\n LOG.info('Writing image with command: %s', ' '.join(command))\n try:\n disk_utils.convert_image(image, device, out_format='host_device',\n cache='directsync', out_of_order=True,\n sparse_size='0')\n except processutils.ProcessExecutionError as e:\n raise errors.ImageWriteError(device, e.exit_code, e.stdout, e.stderr)\n\n disk_utils.trigger_device_rescan(device)", "def write(self, image, values):\n assert isinstance(image, str)\n assert isinstance(values, dict)\n for offset, value in values.items():\n assert isinstance(offset, str)\n assert re.match(\"^(0x)?[a-fA-F0-9]+$\", offset), \\\n f\"offset \\\"{offset}\\\" needs to be in hexidecimal\"\n assert isinstance(value, bytes)\n # convert to hex strings since ttbd_iface_call uses\n # json and bytes are not JSON serilizable\n values[offset] = bytes(value).hex()\n\n target = self.target\n\n self.target.ttbd_iface_call(\n \"images\", \"write\", image = image, values = values)\n\n target.report_info(f\"{image}: data successfully written\")", "def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)", "def write_bytes_to_image(self, file_path):\n data_manipulation.bytes_to_image(self.bytes, file_path)", "def save_image(image: FileStorage, folder: str = None, name: str = None) -> str:\n return IMAGE_SET.save(image, folder, name)", "def write_itk_image(image, path):\n\n writer = itk.ImageFileWriter()\n writer.SetFileName(path)\n\n if os.path.splitext(path)[1] == '.nii':\n Warning('You are converting nii, ' + \\\n 'be careful with type conversions')\n\n writer.Execute(image)", "def save_image_action(self):\n self.view.save_image(self.settings.get_image_type())", "def save(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n if not name:\n raise ValueError, \"name is required\"\n\n if extension:\n format = Image.image_format(extension)\n else:\n format = self.format\n filename = name + self.extension\n path = os.path.join(folder, filename)\n\n image = self.convert(format)\n if image._contents:\n f = open(path, \"wb\")\n f.write(image._contents)\n f.close()\n else:\n image.pil_image.save(path, format)\n\n return path", "def export_image_to_drive(img,\n folder=None,\n scale=None,\n crs=None,\n region=None,\n verbose=False,\n save_metadata=True,\n metadata_folder='.'):\n\n img_prop = ee.Image(img).getInfo()\n img_id = img_prop['id'].replace('/', '_')\n metadata_str = EEHelper.expand_image_meta(img_prop)\n\n if crs is None:\n crs = img_prop['bands'][0]['crs']\n crs_transform = img_prop['bands'][0]['crs_transform']\n\n if scale is None:\n scale = crs_transform[0]\n\n if region is None:\n region_geom = img_prop['properties']['system:footprint']['coordinates']\n else:\n img = img.clip(region)\n region_dict = region.getInfo()\n if region_dict['type'] == 'FeatureCollection':\n region_geom = region_dict['features'][0]['geometry']['coordinates']\n elif region_dict['type'] == 'Feature':\n region_geom = region_dict['geometry']['coordinates']\n elif region_dict['type'] == 'Geometry':\n region_geom = region_dict['coordinates']\n else:\n warnings.warn('Invalid geometry, using image footprint for export.')\n region_geom = img_prop['properties']['system:footprint']['coordinates']\n\n if verbose:\n sys.stdout.write('Exporting: {}\\n'.format(folder + '/' + img_id))\n sys.stdout.write(metadata_str)\n\n task = ee.batch.Export.image.toDrive(\n image=img,\n fileNamePrefix=img_id,\n folder=folder,\n description='Export_{}'.format(img_id),\n scale=scale,\n crsTransform=crs_transform,\n crs=crs,\n region=region_geom,\n maxPixels=1e13,\n skipEmptyTiles=True)\n\n res = task.start()\n if verbose:\n sys.stdout.write(task)\n\n if save_metadata:\n with open(metadata_folder + '/' + img_id + '.txt', 'w') as metadata_file_ptr:\n metadata_file_ptr.write(metadata_str)", "def save(im, output_dir: Path):\n if not hasattr(save, \"counter\"):\n save.counter = 0 # type: ignore\n fname = f\"{save.counter:05d}.jpg\" # type: ignore\n cv2.imwrite(str(output_dir / fname), im)\n print(\"Saved\", fname)\n save.counter += 1 # type: ignore", "def save(self, path, filename=None, overwrite=False):\n \n if filename is None and self.metadata is None:\n raise ValueError(\"If the image has no 'metadata', you must specify a filename\")\n elif filename is not None:\n pass\n elif filename is None and self.metadata is not None:\n filename = os.path.basename(self.metadata[\"pfilename\"])\n \n full_image_path = os.path.join(path, filename)\n \n if overwrite and os.path.exists(full_image_path):\n os.remove(full_image_path)\n \n self.fits.writeto(full_image_path)", "def save_image(self, filename):\n if filename[-4:] != '.pkl':\n filename + '.pkl'\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "def save(self, path):\n dirname = osp.dirname(osp.abspath(path))\n if not osp.isdir(dirname):\n os.mkdir(dirname)\n image = self.build()\n LOGGER.info(\"Save image '%s'\", path)\n image.save(path)\n return image", "def _write_image(image_info, device, configdrive=None):\n starttime = time.time()\n image = _image_location(image_info)\n uuids = {}\n if image_info.get('image_type') == 'partition':\n uuids = _write_partition_image(image, image_info, device, configdrive)\n else:\n _write_whole_disk_image(image, image_info, device)\n totaltime = time.time() - starttime\n LOG.info('Image %(image)s written to device %(device)s in %(totaltime)s '\n 'seconds', {'image': image, 'device': device,\n 'totaltime': totaltime})\n try:\n disk_utils.fix_gpt_partition(device, node_uuid=None)\n except exception.InstanceDeployFailure:\n # Note: the catch internal to the helper method logs any errors.\n pass\n return uuids", "def register_image_file(self, image):\n save_path = os.path.join(self.session_dir, 'image.jpg')\n image.save(save_path)\n self.image = np.array(Image.open(save_path))", "def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")", "def write(self, filename):\n f = open(filename, 'bw')\n\n # file header (14)\n f.write(char('B'))\n f.write(char('M'))\n f.write(dword(14 + 40 + self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(14 + 40))\n\n # image header (40)\n f.write(dword(40))\n f.write(dword(self.width))\n f.write(dword(self.height))\n f.write(word(1))\n f.write(word(24))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(self.width * self.height * 3))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n f.write(dword(0))\n\n # pixel data\n for x in range(self.height):\n for y in range(self.width):\n f.write(self.pixels[x][y])\n f.close()", "def save_screenshot(self, img, file_name: str):\n img.save(str(self.info.screenshots_path / file_name))", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)", "def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)", "def save_image_to_h5(image, h5_path, *args, **kwargs):\n # TODO: Implement the method\n\n f = h5.File(h5_path, \"w\")\n grp = f.create_group(\"data\")\n grp.create_dataset(\"image\", data=image)", "def _write_image(self):\r\n # Create an output raster with the correct number of rows and columns.\r\n gtiff_driver = gdal.GetDriverByName('GTiff')\r\n out_ds = gtiff_driver.Create(os.path.join(self.out_folder, self.out_file_name), self.column, self.row, 1)\r\n out_ds.SetProjection(self.in_ds.GetProjection())\r\n\r\n # Convert the offsets to real-world coordinates for the georeferencing info.\r\n # We can't use the coordinates above because they don't correspond to the pixel edges.\r\n subset_ulx, subset_uly = gdal.ApplyGeoTransform(self.in_gt, self.off_ulx, self.off_uly)\r\n out_gt = list(self.in_gt)\r\n out_gt[0] = subset_ulx\r\n out_gt[3] = subset_uly\r\n out_ds.SetGeoTransform(out_gt)\r\n\r\n data = self.read_image()\r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray(data)\r\n\r\n del out_ds", "def save_image(self, path, image):\n try:\n if isinstance(image, str):\n pass\n elif isinstance(image, list):\n for im in image:\n img = cv2.imread(im, cv2.IMREAD_COLOR)\n cv2.imwrite(path + \"/\" + im.split(\"/\")[-1], img)\n except Exception as e:\n self.logger.exception(e)\n sys.exit(1)", "def save_image(self, file_name: str):\n if not file_name.endswith(\".png\"):\n file_name += \".png\"\n self.image.save(file_name)", "def save_image(dirname, filename, img):\r\n if os.path.exists(dirname) == 0:\r\n os.makedirs(dirname)\r\n cv2.imwrite(dirname+filename+\".bmp\", img)", "def save_blob(self, img_blob, filename):\n out_file = open(filename, \"wb\")\n out_file.write(img_blob)\n out_file.close()", "def write_data(writer: UFOWriter, filename: str, data: bytes) -> None:\n writer.writeImage(filename, data)", "def save_file(self, input_path, output_path):\n try:\n im = Image.open(input_path)\n im.save(output_path)\n return output_path\n except Exception as e:\n return '!ERROR' + str(e)", "def save_image(self):\n self.compressed_image_id = str(uuid.uuid4().hex)\n plot.imsave(\n str(\n self.compressed_image_id + \"{}\").format(\n \".png\"), self.compressed_image)\n\n if self.verbose:\n print(\n \"Compressed image saved at \" + (\n str(self.compressed_image_id + \"{}\").format(\".png\")))", "def imwrite(img, file_path, params=None, auto_mkdir=True):\n if not isinstance(img, np.ndarray):\n raise TypeError('\"img\" must be a numpy array!')\n if auto_mkdir:\n cvtools.makedirs(file_path)\n # return cv.imwrite(file_path, img, params)\n # support path included chinese\n return cv.imencode(osp.splitext(file_path)[-1], img, params)[1].tofile(file_path)", "def saveImage(image, path, changename=0):\n im = Image.open(image)\n im.save(os.path.join(path, str(image)+'.'*changename))\n im.close()\n image.close()", "def save_image(img, view, ts, output_dir):\n\n img = tf.image.decode_jpeg(img, channels=3)\n img = Image.fromarray(img.numpy(), 'RGB')\n img.save(os.path.join(output_dir, f'{ts}_{view}.jpeg'))", "def save(img, filename=None):\n\tif filename is None:\n\t\tdate = time.strftime(\"%Y%m%d\")\n\t\tfilename = \"T\" + str(date) + \".jpg\"\n\t\tcv2.imwrite(filename, img)\n\telse:\n\t\tcv2.imwrite(filename, img)", "def save_image(image, image_path):\n image = ((image[0] + 1) * 127.5).astype(np.uint8) # convert from [-1, 1] to [0, 255]\n img = Image.fromarray(image)\n img.save(os.path.expanduser(image_path))", "def test_write_img(img_: Tensor, ext: str) -> None:\n with NamedTemporaryFile(\"w\") as f:\n path = f\"{f.name}{ext}\"\n write_img(img_, path)\n img = read_image(path)\n torch.testing.assert_allclose(img, img_)", "def save(self, path: str) -> None:\n if self._encoded_image:\n path = self._path_as_png(path)\n self._encoded_image.save(path)\n else:\n print(\"Error! Image was not encoded yet.\")", "def write_image_to_file_incrementally(image):\r\n i = 0\r\n while os.path.exists(\"sample%s.jpeg\" % i):\r\n i += 1\r\n with open(\"sample%s.jpeg\" % i, \"wb\") as f:\r\n f.write(image)", "def save_image(filename):\n subprocess(\"camera_save_image(%r)\" % filename)\n ##image = acquire_image()\n ##image.save(filename)", "def write_to_png(self, filename):\n png_file = open(filename, 'wb')\n writer = png.Writer(self.width, self.height)\n writer.write_array(png_file, self.data)\n png_file.close()", "def save_pic(self, path_pic, pic):\n path_dir = os.path.dirname(path_pic)\n if not os.path.exists(path_dir):\n print(\"[INFO] Directory \\\"{}\\\" does not exist, creating...\"\n .format(path_dir))\n os.makedirs(path_dir)\n\n cv2.imwrite(path_pic, pic)", "def write_image(out, frame):\n if not os.path.exists(out):\n os.makedirs(out)\n now = datetime.now() \n dt_string = now.strftime(\"%H-%M-%S-%f\") \n filename = f'{out}/{dt_string}.png'\n logging.info(f'write image {filename}')\n cv2.imwrite(filename, frame)", "def writeJPG(self, image, save_path):\n #rows, width = image.shape\n #rbgImage = np.zeros((rows, width,3), dtype=np.uint8)\n #rbgImage[:,:,0] = image # b\n #rbgImage[:,:,1] = image # g\n #rbgImage[:,:,2] = image # r\n cv2.imwrite(save_path, image)", "def save_to_image_file(self, filename, image_format='png', scale_x=1, scale_y=1):\n\n self.save_barcode_to_pillow(scale_x=scale_x, scale_y=scale_y).save(filename,\n format=image_format)", "def write_image(image_base64: str, filepath: pathlib.Path):\n with open(filepath, \"wb\") as f:\n f.write(base64.b64decode(image_base64))", "def save_image(image, save_dir, name, mean=None):\n if mean:\n image = unprocess_image(image, mean)\n misc.imsave(os.path.join(save_dir, name + \".png\"), image)", "def save_image(image, save_dir, name, mean=None):\n if mean:\n image = unprocess_image(image, mean)\n misc.imsave(os.path.join(save_dir, name + \".png\"), image)", "def save_image(image, save_dir, name, mean=None):\n if mean:\n image = unprocess_image(image, mean)\n misc.imsave(os.path.join(save_dir, name + \".png\"), image)", "def save_image(path, image): \n if len(image.shape) == 4:\n image = image.reshape((image.shape[1], image.shape[2], image.shape[3]))\n image = np.clip(image * 255.0, 0, 255).astype(np.uint8)\n skimage.io.imsave(path, image)" ]
[ "0.8019482", "0.7901197", "0.7840147", "0.7802012", "0.7660659", "0.7581917", "0.75348264", "0.74246275", "0.7338141", "0.7248253", "0.7200103", "0.719786", "0.7190718", "0.718748", "0.7168532", "0.71044123", "0.70858777", "0.7034931", "0.6973645", "0.6935908", "0.6884878", "0.6867323", "0.6834412", "0.67939794", "0.6768543", "0.67516744", "0.67297685", "0.6729097", "0.67258143", "0.67257994", "0.67131615", "0.6708037", "0.67073977", "0.67053556", "0.6673243", "0.66719025", "0.66113484", "0.6607145", "0.6604288", "0.6598012", "0.6578496", "0.65640455", "0.65640455", "0.6560599", "0.65536565", "0.6543974", "0.6542886", "0.6539522", "0.6532521", "0.65104485", "0.6493508", "0.6481547", "0.64727867", "0.6459243", "0.64552844", "0.6429287", "0.6412439", "0.63699347", "0.63579607", "0.63567865", "0.63556385", "0.6330481", "0.63057965", "0.62977564", "0.62855595", "0.62845635", "0.62268996", "0.6224247", "0.6217528", "0.6194041", "0.61873466", "0.6175582", "0.61736476", "0.6165457", "0.6165093", "0.6159404", "0.6155172", "0.6150981", "0.61462617", "0.613425", "0.6132741", "0.61251974", "0.61236185", "0.611236", "0.6094921", "0.60671204", "0.60567284", "0.60450494", "0.60412765", "0.60195863", "0.6018563", "0.6013654", "0.6013516", "0.600209", "0.60009176", "0.5999851", "0.59964466", "0.5988435", "0.5988435", "0.5988435", "0.59871215" ]
0.0
-1
Write an image with the proper png save
def drop_png_image(im, trg): if trg[-4:] != '.png': ERROR('destination name must be png extension', 'drop_png_image', 1) if isinstance(trg, basestring): im['render_min'] = im['minimum'] im['render_max'] = im['maximum'] im.write_image(trg, 0) else: ERROR('destination is not a file name', 'drop_png_image', 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_image(image, file_name):\n io.imsave(file_name,image)", "def test_save_png():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.png', 'data': [img]}\n\n assert images.save(parameters)", "def save_png(self, filename):\n post_script = self.canvas.postscript().encode()\n img = Image.open(io.BytesIO(post_script))\n img.save(filename, format=\"PNG\")", "def save_to_image(img, filename):\n filename = os.path.join(datadir, filename + '.png')\n print('Saving: ' + filename)\n img.to_pil().save(filename)", "def save(image, name):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.savefig(name)", "def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)", "def write_png(buffer, width, height, fileobj, dpi=None): # real signature unknown; restored from __doc__\n pass", "def save_image(image, figsize, save_path, ticks=False, grey=True):\n fig = plt.figure(figsize=figsize)\n if grey:\n plt.imshow(image, cmap=plt.get_cmap('gray'))\n else:\n plt.imshow(image)\n if not ticks:\n plt.xticks([]), plt.yticks([])\n plt.tight_layout()\n fig.savefig(save_path)\n plt.close(fig)\n return", "def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")", "def save_image(name, image):\n image_name = 'output/' + name + '.png'\n cv2.imwrite(image_name, image)", "def write_image(self, image_name, image):\n raise NotImplementedError", "def save_image(self):\r\n filename = filedialog.asksaveasfilename(title='Save Image As...',\r\n filetypes=((\"Portable Network Graphics (.png)\", \"*.png\"), (\"Portable Document Format(.pdf)\", \"*.pdf\")))\r\n self.graph.savefig(filename, dpi=self.graph.dpi)", "def imwrite(image, path):\n return scipy.misc.imsave(path, to_range(image, 0, 255, np.uint8))", "def write(self, image):\n raise NotImplementedError()", "def test_save_jpg():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.jpg', 'data': [img]}\n\n assert images.save(parameters)", "def archive_image(self, img):\n \n try:\n imgname = \"roboimg\" + str(int(time.time())) + \".png\"\n imgpath = os.path.join(self.imgdir, imgname)\n # print(\"Pic name \" + imgpath)\n\n cv2.imwrite(imgpath, img)\n except:\n self.logger.error(\"archive_image failed %s\" % (imgpath))", "def save(self, fn):\n plt.imsave(fn, self.image)", "def save_image(self):\n self.compressed_image_id = str(uuid.uuid4().hex)\n plot.imsave(\n str(\n self.compressed_image_id + \"{}\").format(\n \".png\"), self.compressed_image)\n\n if self.verbose:\n print(\n \"Compressed image saved at \" + (\n str(self.compressed_image_id + \"{}\").format(\".png\")))", "def save_image(path, data):\n misc.imsave(path, data)", "def save_image(self, filename):\n raster.save_image(filename, self.image, self.metadata)", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def write_image(img, img_name):\n\n cv2.imwrite(img_name, img)", "def tiffwrite(filename, im):\n tf.imwrite(filename, im)", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save(cls, image: Image, save_path: typing.Union[str, BytesIO]):\n # print(f\"[save] {save_path}\")\n data = image.get_image_for_save()\n imagej_kwargs = {}\n if image.labels is not None:\n imagej_kwargs[\"Labels\"] = image.labels * image.layers\n coloring = image.get_imagej_colors()\n if coloring is not None:\n imagej_kwargs[\"LUTs\"] = coloring\n ranges = image.get_ranges()\n ranges = np.array(ranges).reshape(len(ranges) * 2)\n # print(ranges)\n imagej_kwargs[\"Ranges\"] = ranges\n spacing = image.get_um_spacing()\n\n metadata = {\"mode\": \"color\", \"unit\": \"\\\\u00B5m\"}\n if len(spacing) == 3:\n metadata.update({\"spacing\": spacing[0]})\n resolution = [1 / x for x in spacing[-2:]]\n cls._save(data, save_path, resolution, metadata, imagej_kwargs)", "def save_image(path, img, if_pal=False):\n img = Image.fromarray(img)\n if if_pal:\n img.putpalette([0, 0, 0, 128, 0, 0] + [0, 0, 0] * 253 + [224, 224, 192])\n img.save(path)", "def write_image(self, name: str, image_path: str):\n # TODO: implement\n raise NotImplementedError(\"We are working on this!\")", "def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)", "def save_image(self, file_name: str):\n if not file_name.endswith(\".png\"):\n file_name += \".png\"\n self.image.save(file_name)", "def writePNG(path, pixels, width, height, is_rgba=False):\r\n\r\n with open(path, \"wb\") as fp:\r\n fp.write(buildPNG(pixels, width, height, is_rgba=is_rgba))", "def img_save(name,img):\n cv2.imwrite(name,img)", "def save_image(image):\n if config['save_images']['enabled']:\n directory = config['save_images']['destination']\n filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S%f\") + '.jpg'\n destination = os.path.join(directory, filename)\n logging.debug('saving image to %s', destination)\n f = open(destination, 'wb')\n f.write(image)\n f.close", "def save_image(path, image): \n if len(image.shape) == 4:\n image = image.reshape((image.shape[1], image.shape[2], image.shape[3]))\n image = np.clip(image * 255.0, 0, 255).astype(np.uint8)\n skimage.io.imsave(path, image)", "def save_img(img: np.ndarray, path: str) -> None:\n\n img_obj = Image.fromarray(img)\n img_obj.save(path)", "def write_to_png(self, filename):\n png_file = open(filename, 'wb')\n writer = png.Writer(self.width, self.height)\n writer.write_array(png_file, self.data)\n png_file.close()", "def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()", "def save(self, filename):\n self.image.save(filename, self.options.img_format)", "def save_image(img, path):\n cv2.imwrite(path, img)", "def imgWrite(img, path):\n dirMake(os.path.dirname(path))\n sitk.WriteImage(img, path)\n\n # Reformat files to be compatible with CIS Software\n #ext = os.path.splitext(path)[1].lower()\n #if ext == \".vtk\": vtkReformat(path, path)", "def save(self, filename):\n try:\n import PIL\n except ImportError:\n raise RuntimeError('Could not import PIL. PIL (pillow) is required to save fresnel images.')\n else:\n if self._output is None:\n self.render()\n image = PIL.Image.fromarray(self._output[:], mode='RGBA')\n image.save(filename)", "def saveImage(self, fileName=\"mandelbrot.frac\"):\n # Save the image as a PNG\n if fileName == \"\":\n fileName = \"mandelbrot.frac\"\n directories = fileName.split(\"/\")\n for n in directories:\n if \".frac\" in n:\n name = n.rsplit(\".\", 1)[0]\n self.img.write(f\"{name}.png\")\n print(f\"Wrote image {name}.png\")", "def write_png(self, output_name):\n self.fig.savefig(output_name)\n return", "def save_image(self):\n self.save()", "def export_PNG(filename, I):\n\n type = 'L'\n if depth(I) == 3:\n type = 'RGB'\n if depth(I) == 4:\n type = 'RGBA'\n if istorch(I):\n Image.fromarray(torch2numpy(I * 255).astype(numpy.uint8), type).save(filename)\n if isnumpy(I):\n Image.fromarray((I * 255).astype(numpy.uint8), type).save(filename)\n return", "def exportImg(self):\n if self.superSampling:\n print(\"Exporting with size adjusted\")\n self.img = self.img.resize((int(self.width/2),int(self.height/2)),Image.NEAREST)\n self.img.save(self.fileName,\"PNG\")", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)", "def saveImage(self, event):\r\n fileWritten = self.image.writeFile()\r\n self.statusBar.SetStatusText(\"Saved {}\".format(fileWritten))", "def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')", "def write_image(path, tokens, weights):\n\n f = render_attn_inner(tokens, weights)\n f.savefig(path, bbox_inches=\"tight\", frameon=False)\n plt.close(f)", "def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)", "def __make_png(self, abspath_img_rgb):\n if not os.path.exists(DIR_PNG):\n os.makedirs(DIR_PNG)\n\n outsize = '{}%'.format(OUTSIZE_RGB)\n img_name_rgb = os.path.basename(abspath_img_rgb)\n suffix_extension_tif = Utils.get_suffix_tif(img_name_rgb)\n img_png = img_name_rgb.replace(suffix_extension_tif, '.png')\n path_img_png = os.path.join(DIR_PNG, img_png)\n\n command = \"gdal_translate -ot byte -of PNG -outsize {} {} \" \\\n \"-a_nodata 0 -q {} {}\".format(\n outsize, outsize, abspath_img_rgb, path_img_png\n )\n os.system(command)\n return os.path.join(DIR_PNG_TO_DB, img_png)", "def export_as_image(self):\n from ExportCommand import ExportCommand\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_path, _ = QFileDialog.getSaveFileName(self, \"Export as png\", os.getcwd(), \"png file(*.png)\",\n options=options)\n if file_path:\n try:\n # hide image drawing\n self.onViewImageDrawing(False)\n\n cmd = ExportCommand(self.graphicsView.scene(), 'image')\n cmd.display_message.connect(self.onAddMessage)\n\n if cmd.execute(file_path):\n QMessageBox.information(self, self.tr('Information'), self.tr('Successfully export to image file'))\n else:\n QMessageBox.information(self, self.tr('Error'), self.tr('Fail to export to image file'))\n finally:\n if self.actionImage_Drawing.isChecked():\n self.onViewImageDrawing(True)\n self.actionImage_Drawing.setChecked(True)", "def save_image(image, image_path):\n image = ((image[0] + 1) * 127.5).astype(np.uint8) # convert from [-1, 1] to [0, 255]\n img = Image.fromarray(image)\n img.save(os.path.expanduser(image_path))", "def write_image(out, frame):\n if not os.path.exists(out):\n os.makedirs(out)\n now = datetime.now() \n dt_string = now.strftime(\"%H-%M-%S-%f\") \n filename = f'{out}/{dt_string}.png'\n logging.info(f'write image {filename}')\n cv2.imwrite(filename, frame)", "def save_image(img, view, ts, output_dir):\n\n img = tf.image.decode_jpeg(img, channels=3)\n img = Image.fromarray(img.numpy(), 'RGB')\n img.save(os.path.join(output_dir, f'{ts}_{view}.jpeg'))", "def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)", "def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)", "def saveImageAs(self, name):\n\t\tself.image.save(name)", "def save_image_action(self):\n self.view.save_image(self.settings.get_image_type())", "def write_image(img, img_saving_path):\n if isinstance(img, list):\n img = np.asarray(img, dtype=np.uint8)\n elif isinstance(img, np.ndarray):\n if not img.dtype == np.uint8:\n assert np.max(img) <= 1, \"Maximum pixel value {:.3f} is greater than 1\".format(np.max(img))\n img = (255 * img).astype(np.uint8)\n \n print((img[0]))\n else:\n raise TypeError(\"img is neither a list nor a ndarray.\")\n\n cv2.imwrite(img_saving_path, img)", "def save_screenshot(self, img, file_name: str):\n img.save(str(self.info.screenshots_path / file_name))", "def test_save_tif():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.tif', 'data': [img]}\n\n assert images.save(parameters)", "def write_image(path, image):\n image = tf.image.encode_jpeg(image, quality=100)\n return tf.io.write_file(path, image)", "def save(self, filename):\n \n path, name = os.path.split(filename)\n ext = name.split(\".\")[-1]\n _tkExec(self.image.write, filename, format=ext)", "def _save_annotation(annotation, filename):\n\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n '''\n with tf.io.gfile.GFile(filename, mode='w') as f:\n #with open(filename, mode='w') as f:\n print(f)\n pil_image.save(f, 'PNG')\n '''\n pil_image.save(filename)", "def test_write_rgb(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.png\")\n img = np.random.rand(2, 3, 3)\n img_save_val = (255 * img).astype(np.uint8)\n writer_obj = PILWriter(output_dtype=np.uint8)\n writer_obj.set_data_array(img, channel_dim=-1)\n writer_obj.write(image_name)\n out = np.asarray(Image.open(image_name))\n out = np.moveaxis(out, 0, 1)\n np.testing.assert_allclose(out, img_save_val)", "def imwrite(image, path, quality=95, **plugin_args):\n iio.imsave(path, dtype.im2uint(image), quality=quality, **plugin_args)", "def image_save(path, na: numpy.ndarray):\n # change type\n na = numpy.fmax(numpy.fmin(na * 255.0, 255), 0).astype(\"uint8\")\n # shape is now (1,3,h,w), remove 1\n na = na.reshape(3,na.shape[2],na.shape[3])\n # fix shape\n na = numpy.moveaxis(na, [0,1,2], [2,0,1])\n # shape is now (h,w,3)\n # file\n Image.fromarray(na).save(path)", "def test_save_fail():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'foo.bar', 'data': [img]}\n\n images.save(parameters)", "def saveScreenPNG(self, filename):\n return nes_lib.saveScreenPNG(self.obj, filename)", "def save_file(self, _filename):\n imgsize = (self.__resolution[0], self.__resolution[1])\n print imgsize\n\n if(self.__resolution[2] == 1):\n # grayscale -> convert to RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n # duplicate the channels\n ucharcol = (255 * col[0], 255 * col[0], 255 * col[0])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 3):\n # RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n ucharcol = (255 * col[0], 255 * col[1], 255 * col[2])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 4):\n # RGBA\n bg_white = (255, 255, 255, 255)\n img = Image.new(\"RGBA\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = 255 * self.get_color((x, y))\n ucharcol = (int(col[0]), int(col[1]), int(col[2]), int(col[3]))\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n else:\n raise StandardError, ('supported number of channels are 1, 3, and 4, only.')\n\n img.save(_filename)", "def saveImage(turtle, filename):\n ts = turtle.getscreen()\n tc = ts.getcanvas()\n tc.postscript(file=filename)", "def imageSaveOutput(image,name,number):\n FileName = name +\" \"+number\n mpimg.imsave(\"test_images_output\"+'//'+FileName,image)\n return 0;", "def save_image(nom, image, _geo_trans):\n cols = image.shape[2]\n rows = image.shape[1]\n bands = image.shape[0]\n print(bands, cols, rows)\n driver = gdal.GetDriverByName(\"GTiff\")\n out_raster = driver.Create(nom, cols, rows, bands, gdal.GDT_Byte)\n # if (geo_trans):\n # outRaster.SetGeoTransform(geo_trans)\n band = out_raster.GetRasterBand(1)\n band.WriteArray(np.round(image[0]))\n band = out_raster.GetRasterBand(2)\n band.WriteArray(np.round(image[1]))\n band = out_raster.GetRasterBand(3)\n band.WriteArray(np.round(image[2]))", "def writeJPG(self, image, save_path):\n #rows, width = image.shape\n #rbgImage = np.zeros((rows, width,3), dtype=np.uint8)\n #rbgImage[:,:,0] = image # b\n #rbgImage[:,:,1] = image # g\n #rbgImage[:,:,2] = image # r\n cv2.imwrite(save_path, image)", "def save_image(path, image, cmap='gist_earth_r'):\n n_cols = n_rows = 1\n n_pixels = 256\n dpi_of_monitor = 96 # HARDCODED DPI VALUE FROM MY OLD DELL LAPTOP...\n figsize = (n_pixels * n_cols / dpi_of_monitor,\n n_pixels * n_rows / dpi_of_monitor)\n f, ax = plt.subplots(n_rows, n_cols, figsize=figsize)\n f.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)\n ax.axis('off')\n ax.imshow(image, cmap=cmap, vmin=0, vmax=None)\n f.savefig(path, dpi=dpi_of_monitor)\n plt.close(f)", "def save_as_jpg(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.jpg')\n plt.close()", "def _save_image(self, image_name, image, output_dir):\n dst = '{}/{}'.format(output_dir, self._image_filename(image_name))\n os.makedirs(output_dir, exist_ok=True)\n try:\n with open(dst, 'wb') as f:\n for chunk in image.save(named=self.image_registry_name(image_name)):\n f.write(chunk)\n log.info('Image {} saved as {}'.format(image_name, dst))\n except Exception as err:\n if os.path.isfile(dst):\n os.remove(dst)\n raise err", "def save_subpix_png(path, img, scale_factor=256.0):\n Path(path).parent.mkdir(parents=True, exist_ok=True)\n img = img.astype(np.float32) * scale_factor\n if np.amax(img) > (2**16)-1:\n warnings.warn(\"image out of range(\" + str(np.amax(img)/scale_factor) +\n \"), try with a smaller scale factor. loading this file \" +\n \"will results in invalid values, file: \"+str(path))\n img[img > (2**16)-1] = 0\n img = img.astype(np.uint16)\n cv2.imwrite(str(path), img)", "def saveImage(image, path, changename=0):\n im = Image.open(image)\n im.save(os.path.join(path, str(image)+'.'*changename))\n im.close()\n image.close()", "def jpeg_to_png(img: bytes) -> bytes:\n im = Image.open(BytesIO(img))\n width = 240\n height = int(im.size[1] * (240 / im.size[0]))\n im = im.convert(\"RGB\").resize((width, height))\n stream = BytesIO()\n im.save(stream, format=\"PNG\")\n return stream.getvalue()", "def save_fail_img(self):\n self.save_img(\"Email\")", "def _save_frame_as_png(\n self : \"animation\",\n frame : \"np.ndarray\",\n filename : \"str\"\n ):\n im = Image.fromarray(frame)\n im.save(filename)", "def save_to_image_file(self, filename, image_format='png', scale_x=1, scale_y=1):\n\n self.save_barcode_to_pillow(scale_x=scale_x, scale_y=scale_y).save(filename,\n format=image_format)", "def save_image(image_numpy, image_path, aspect_ratio=1.0, color_map=False):\n if color_map:\n import matplotlib.pyplot as plt\n cm = plt.get_cmap('jet')\n colored_image = cm(image_numpy[:,:,0])[:,:,:3]\n# print_numpy(colored_image, val=True, shp=True) # max 1.0 min 0.0 shape (256,256,3)\n \n image_pil = Image.fromarray((colored_image*255.).astype(np.uint8))\n else:\n# print_numpy(image_numpy, val=True, shp=True)\n image_pil = Image.fromarray(image_numpy)\n h, w, _ = image_numpy.shape\n\n if aspect_ratio > 1.0:\n image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)\n if aspect_ratio < 1.0:\n image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)\n image_pil.save(image_path)", "def save_as_png(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.png')\n plt.close()", "def save_to_png(arr,\n path=None,\n image_mode=None,\n show=True,\n labels=None,\n scale=None):\n if image_mode is None:\n image_mode = _get_image_type_from_array(arr)\n\n img = Image.fromarray(arr, mode=image_mode)\n\n if labels is not None:\n img = add_header(img, labels)\n\n if scale is None:\n scale = max(1, int(500 / max(arr.shape)))\n\n if scale != 1:\n img = img.resize((img.size[0] * scale, img.size[1] * scale))\n\n # Saving to a temporary file is needed even when showing in a notebook\n if path is None:\n path = '/tmp/tmp.png'\n elif not path.endswith('.png'):\n # Only PNG is supported because JPEG files are unnecessarily 3 times larger.\n path = '{}.png'.format(path)\n with gfile.Open(path, 'wb') as fout:\n img.save(fout, format=path.split('.')[-1])\n\n # Show image (great for notebooks)\n if show:\n display.display(display.Image(path))", "def test_save_images(self):\n save_file(self.quart.save_images, to_single_file=False)", "def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)", "def save_fig(fig, filename):\n fig_filepath = figures_path / filename\n fig.write_image(str(fig_filepath))\n logging.info(f\"Written figure to {fig_filepath.resolve()}\")", "def saveImage(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, currentView: bool=True, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dropCallback:\n Script=None, enable: bool=True, enableBackground: bool=True, enableKeyboardFocus:\n bool=True, exists: bool=True, fullPathName: bool=True, height: Union[int, bool]=0,\n highlightColor: Union[List[float, float, float], bool]=None, image: Union[AnyStr,\n bool]=\"\", isObscured: bool=True, manage: bool=True, noBackground: bool=True,\n numberOfPopupMenus: bool=True, objectThumbnail: AnyStr=\"\", parent: Union[AnyStr,\n bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True, sceneFile:\n AnyStr=\"\", statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible:\n bool=True, visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def save_plot_as_png(self):\n file_save_path = QFileDialog.getSaveFileName(self, 'Save Plot PNG', \"\", \"PNG (*.png)|*.png\")\n\n if file_save_path[0]:\n self.figure.savefig(file_save_path[0], bbox_inches='tight')\n QMessageBox.about(self, \"Success!\", \"Your plot has been saved as png image successfully.\")", "def dump_image(image, path_image):\n cv2.imwrite(path_image, image)\n return", "def save_image(image, output_path, limits=None):\n\n # Generate a figure with no axes, border, etc.\n fig = plt.figure()\n fig.set_size_inches(1, 1)\n ax = plt.Axes(fig, [0, 0, 1, 1])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n # If the image is complex, warn the user and discard the imaginary part.\n if np.iscomplexobj(image):\n warnings.warn('Image is complex; only saving real part')\n image = np.real(image)\n\n # Plot the image, scaled according to the limits if they are provided.\n if limits is None:\n ax.imshow(image, cmap='gray', aspect='auto', interpolation='none')\n else:\n vmin = limits[0]\n vmax = limits[1]\n ax.imshow(image, cmap='gray', vmin=vmin, vmax=vmax, aspect='auto', interpolation='none')\n\n # Save the figure with a high resolution, and close it.\n plt.savefig(output_path, dpi=800)\n plt.close()", "def write(self, filename):\n\n self.__image.save(filename)", "def save_file(output: Optional[Path], input_path: Path, image: Image.Image):\n if isinstance(output, Path) and str(output) != \"none\":\n if output.is_dir() and output.exists():\n image.save(output.joinpath(input_path.with_suffix(\".png\").name))\n elif output.suffix != \"\":\n if output.suffix != \".png\":\n warnings.warn(\n f\"Only export with .png extension is supported! Your {output.suffix}\"\n f\" extension will be ignored and replaced with .png!\"\n )\n image.save(output.with_suffix(\".png\"))\n else:\n raise ValueError(\"Wrong output path!\")\n elif output is None or str(output) == \"none\":\n image.save(\n input_path.with_name(\n input_path.stem.split(\".\")[0] + \"_bg_removed\"\n ).with_suffix(\".png\")\n )", "def save_image(image, save_dir, name, mean=None):\n if mean:\n image = unprocess_image(image, mean)\n misc.imsave(os.path.join(save_dir, name + \".png\"), image)" ]
[ "0.7533463", "0.7480259", "0.7462284", "0.74407417", "0.73850626", "0.7356224", "0.73480725", "0.72761285", "0.7220644", "0.721927", "0.7194732", "0.7168477", "0.7134913", "0.71134233", "0.7101273", "0.7100358", "0.7097144", "0.70930296", "0.70745164", "0.7069603", "0.70466787", "0.70378333", "0.703373", "0.70327586", "0.70312387", "0.7005926", "0.7005926", "0.70043236", "0.69959724", "0.69720626", "0.6963292", "0.69531524", "0.69456846", "0.69419765", "0.6914137", "0.69118685", "0.6908057", "0.6904757", "0.68915844", "0.68909913", "0.6874423", "0.68744105", "0.68638736", "0.68453306", "0.6842834", "0.6834024", "0.68280274", "0.6819199", "0.68146956", "0.6762597", "0.6748164", "0.67320806", "0.6725084", "0.67250603", "0.671136", "0.66918844", "0.6689213", "0.6686139", "0.6679472", "0.6679026", "0.6676359", "0.6676162", "0.6674221", "0.6671166", "0.6656665", "0.6654454", "0.6652576", "0.6651884", "0.6646348", "0.6642111", "0.6633047", "0.66330373", "0.6626962", "0.6625441", "0.66247445", "0.66108227", "0.6610526", "0.66043687", "0.6592072", "0.6583441", "0.6578684", "0.6573912", "0.65718824", "0.6563734", "0.654326", "0.6543015", "0.6532575", "0.65294755", "0.65183467", "0.65178174", "0.6504502", "0.6501417", "0.64917195", "0.6483743", "0.64782745", "0.64717966", "0.64648896", "0.6464075", "0.6458412", "0.6456693", "0.6450137" ]
0.0
-1
Create a spidercompatible "Doc" file.
def drop_spider_doc(filename, data, comment = None): outf = open(filename, "w") from datetime import datetime outf.write(" ; %s %s %s\n" % (datetime.now().ctime(), filename, comment)) count = 1 # start key from 1; otherwise, it is confusing... for dat in data: try: nvals = len(dat) if nvals <= 5: datstrings = ["%5d %d" % (count, nvals)] else : datstrings = ["%6d %d" % (count, nvals)] for num in dat: datstrings.append("%12.5g" % (num)) except TypeError: # dat is a single number datstrings = ["%5d 1%12.5g" % (count, dat)] datstrings.append("\n") outf.write("".join(datstrings)) count += 1 outf.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def get_doc(filename: str) -> str:\n\n # Create the header.\n doc = \"# `\" + filename.split(\"/\")[-1] + \"`\\n\\n\"\n\n lines: List[str] = Path(filename).read_text().split(\"\\n\")\n\n for i in range(len(lines)):\n # Create a class description.\n if lines[i].startswith(\"class\"):\n # Skip private classes.\n match = re.search(\"class _(.*):\", lines[i])\n if match is not None:\n continue\n # Add the name of the class\n class_name = re.search(\"class (.*):\", lines[i]).group(1)\n doc += f\"## `{class_name}`\\n\\n\"\n # Add an example.\n class_example = f\"`from tdw.{filename[:-3].replace('/', '.')} import \" + re.sub(r\"(.*)\\((.*)\\)\", r'\\1',\n class_name) + \"`\"\n doc += class_example + \"\\n\\n\"\n doc += PyDocGen.get_class_description(lines, i)\n # Parse an enum.\n if re.search(r\"class (.*)\\(Enum\\):\", lines[i]) is not None:\n doc += \"\\n\\n\" + PyDocGen.get_enum_values(lines, i)\n doc += \"\\n\\n***\\n\\n\"\n # Create a function description.\n elif lines[i].strip().startswith(\"def\"):\n # Skip private functions.\n match = re.search(\"def _(.*)\", lines[i])\n if match is not None and \"__init__\" not in lines[i]:\n continue\n # Append the function description.\n doc += PyDocGen.get_function_documentation(lines, i) + \"\\n\\n***\\n\\n\"\n\n # Move the \"main class\" to the top of the document.\n main_class_name = ''.join(x.capitalize() or '_' for x in filename[:-3].split('_'))\n main_class = re.search(\"(## `\" + main_class_name + \"`((.|\\n)*))\", doc)\n if main_class is not None:\n main_class = main_class.group(1)\n doc_header = re.search(\"(.*)\\n\\n\", doc).group(0)\n doc_temp = doc.replace(main_class, \"\").replace(doc_header, \"\")\n doc = doc_header + main_class + doc_temp\n\n return doc", "def generate(self):\n\n # Write Doxyfile\n doxyfile_content = DOXYFILE_TEMPLATE.format(\n name=\"wurfapi\",\n output_path=self.output_path,\n source_path=\" \".join(self.source_paths),\n recursive=\"YES\" if self.recursive else \"NO\",\n extra=\"\",\n )\n\n doxyfile_path = os.path.join(self.output_path, \"Doxyfile\")\n with open(doxyfile_path, \"w\") as doxyfile:\n\n doxyfile.write(doxyfile_content)\n\n # @todo: Doxygen generates a bunch of warnings. We should\n # propagate these somehow - if you want to know what\n # has not been documented etc.\n result = self.runner.run(\n command=self.doxygen_executable + \" Doxyfile\", cwd=self.output_path\n )\n\n # Doxygen reports warnings on stderr. So if we have some output\n # there raise it.\n self._suppress_incorrect_warnings(result.stderr)\n\n if result.stderr.output and self.warnings_as_error:\n raise wurfapi.doxygen_error.DoxygenError(result.stderr.output)\n\n # The Doxygen XML is written to the 'xml' subfolder of the\n # output directory\n return os.path.join(self.output_path, \"xml\")", "async def create_doc(self, *args, **kwargs):\n pass", "def generate_document(self):\n if not self.soup:\n self.soup = BeautifulSoup(self.text)\n body = self.soup.find('body')\n with open('document.txt', 'wb') as f1:\n for tag in body.children:\n text = (str(tag)\n if isinstance(tag, NavigableString)\n else tag.get_text())\n if not text.endswith('\\n'):\n text += '\\n'\n f1.write(text.encode())", "def savedoc():\r\n document.save('QSDoc_{0}_{1}_{2}_{3}.docx'.format(args.server, year, month, day))", "def docGenerator(docRequirements, docFilePath):\n\tamount = int(docRequirements[0])\n\tsize = docRequirements[1]\n\tunit = docRequirements[2].lower()\n\tif not(isValidUnit(unit)):\n\t\tprint \"Unit is incorrect.\"\n\t\treturn\n\tprint \"Creating %s files, each %s%s in size...\" % (amount, size, unit)\n\troundDown = int(float(size))\n\tfilename = fileToUse(roundDown, unit)\n\tnumOfWrites = calcNumOfWrites(roundDown, filename, unit)\n\tfor i in range(0, amount):\n\t\tfor j in range(0, numOfWrites):\n\t\t\twith open(filename) as base:\n\t\t\t\twith open(docFilePath+\"file_%03d.txt\" % i, \"a\") as output:\n\t\t\t\t\toutput.write(base.read())\n\t\tconvertedSize = convertFromBytes(int(os.path.getsize(output.name)), unit)\n\t\tprint \"Created file %s of %s%s size.\" % (output.name, convertedSize, unit)\n\tprint \"Generated %s %s%s files locally.\" % (amount, size, unit)\n\tbase.close()\n\tpushDocsFromDir(docFilePath)", "def write_spider(args, spider_path, templates):\n now_str = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n spider_code = templates.format(author=args.author,\n email_addr=args.email,\n name=args.name,\n version=args.version,\n now=now_str,\n purpose=args.purpose)\n with open(spider_path, 'w') as f_obj:\n f_obj.writelines(spider_code)", "def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def create_spider_doc(fname,spiderdoc):\n\tfrom string import atoi,atof\n\tinfile = open(fname,\"r\")\n\tlines = infile.readlines()\n\tinfile.close()\n\tnmc = len(lines[0].split())\n\ttable=[]\n\tfor line in lines:\n\t\tdata = line.split()\n\tfor i in xrange(0,nmc):\n\t\tdata[i] = atof(data[i])\n\t\ttable.append(data)\n\tdrop_spider_doc(spiderdoc ,table)", "def make_doc(description=False, sentinel=COMMENT):\n for readme_path in [os.path.abspath(x) for x in glob(\"*/README.md\")]:\n with open(readme_path, \"r+\") as readme_file:\n readme = readme_file.read()\n if sentinel in readme:\n root = os.path.dirname(readme_path)\n tree = make_tree_doc(root)\n pattern = r\"{}.*?\\n(\\w+)?\\n\".format(sentinel)\n text = re.sub(pattern, format_tree(tree), readme, flags=re.DOTALL)\n readme_file.seek(0)\n readme_file.truncate()\n readme_file.write(text)", "def make_doc(file, \n title, \n author, \n geometry_options = {\"margin\": \"1.15cm\"}, \n options=\"\"):\n \n # geometry_options = {\"tmargin\": \"1.5cm\", \"lmargin\": \"2.5cm\"},\n\n doc = Document(\n default_filepath = file.as_posix(),\n documentclass = 'article',\n document_options = ['12pt', 'titlepage' ] + options,\n fontenc = \"T1\",\n inputenc = ENCODING,\n font_size = \"normalsize\",\n lmodern = True,\n page_numbers = False,\n indent = False,\n geometry_options = geometry_options\n )\n doc.change_document_style(\"empty\")\n date = str(datetime.datetime.date(datetime.datetime.now()))\n comment(doc.preamble, \"Automatically generated by make on \" + date)\n\n doc.preamble.append(Command(\"usepackage\",\"graphicx\"))\n doc.preamble.append(Command(\"usepackage\",\"enumitem\"))\n doc.preamble.append(Command(\"usepackage\",\"url\"))\n doc.preamble.append(NoEscape(\"\\\\setcounter{secnumdepth}{0}\"))\n doc.preamble.append(Command(\"usepackage\",\"titlesec\"))\n doc.preamble.append(NoEscape(\"\\\\titlespacing\\\\section{0pt}{12pt}{12pt}\"))\n doc.preamble.append(NoEscape(\"\\\\titlespacing\\\\subsection{0pt}{12pt}{12pt}\"))\n doc.preamble.append(Command(\"title\", title.title()))\n doc.preamble.append(Command(\"author\", author.title()))\n doc.preamble.append(Command(\"date\", NoEscape(r'\\today')))\n doc.append(NoEscape(r'\\maketitle'))\n return doc", "def onDocumentation(self):\n path = self.settings.path + 'doc\\\\report.pdf'\n os.startfile(path)", "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def generate_document(self):\n faker = Faker()\n # Start creating a HTML document...\n html = etree.Element(\"html\")\n head = etree.Element(\"head\")\n # Set the document title\n title = etree.Element(\"title\")\n title.text = faker.sentence()\n head.append(title) # Add the title to the head of the document\n # Set document keywords\n keywords = \", \".join([word for word in faker.words(random.randint(0, 6))])\n keywords = etree.Element(\n \"meta\",\n name = \"keywords\",\n content = keywords,\n )\n head.append(keywords) # Add the keywords to the head of the document\n # Set document description\n description = faker.paragraph(random.randint(0, 10))\n description = etree.Element(\n \"meta\",\n name = \"description\",\n content = description,\n )\n head.append(description) # Add the description to the head of the document\n # Set document author\n author = faker.name()\n author = etree.Element(\n \"meta\",\n name = \"author\",\n content = author,\n )\n head.append(author) # Add the author to the head of the document\n # Append the head to the html document\n html.append(head)\n # Add some content to the body of the document\n body = etree.Element(\"body\")\n center = etree.Element(\"center\")\n h1 = etree.Element(\"h1\")\n h1.text = title.text\n center.append(h1)\n body.append(center)\n # Append the body of the document to the HTML document\n html.append(body)\n # return a string representation of the HTML document\n return etree.tostring(html, pretty_print = True)", "def documento():\r\n\tpass", "def document(self, outputDir, docFormat=MARKDOWN):\n self.__docWriter.document(outputDir, docFormat)", "def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def documentation_file(output_filename):\n filename = os.path.basename(output_filename)\n txt_file = '{}.txt'.format(filename)\n\n return os.path.join(os.path.dirname(__file__), os.pardir, 'configs', 'documentation', txt_file)", "def genspider(ctx, name, domain):\n spider_filename = _gen_spider(name, domain)\n print('Created {0}'.format(spider_filename))\n\n test_filename = _gen_tests(name, domain)\n print('Created {0}'.format(test_filename))", "def document(self):\n ...", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def make_document(source_path=\"notset\") -> nodes.document:\n settings = OptionParser(components=(RSTParser,)).get_default_values()\n return new_document(source_path, settings=settings)", "def store_documentation(self, file_path, *args, **dargs):\n pass", "def create_document(file_name):\n path = INPUT_DIR+file_name # assemble the file descriptor\n file = open(path) # open in read mode\n doc = Document() # create a new document\n # add the title field\n doc.add(StringField(\"title\", input_file, Field.Store.YES))\n # add the whole book\n doc.add(TextField(\"text\", file.read(), Field.Store.YES))\n file.close() # close the file pointer\n return doc", "def get_documentation():\n return send_file(base_dir / \"static/documentation.html\", \"text/html; charset=UTF-8\")", "def opendocs():\n _open_file('_build/index.html')", "def docs():", "def store_documentation(self, file_path, css_path=None):\n html = self.get_documentation(css_path)[1]\n with open(file_path, \"w+\", encoding=\"utf8\") as f:\n f.write(html)", "def save(self, content_dir):\n print_verbose(\n \"INFO : Writing random HTML documents to files...\",\n self.args.verbose,\n )\n for i in range(self.n):\n dir_path = content_dir + \"/\" + \"staticpage\" + str(i)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n index_file = os.path.join(dir_path, \"index.html\") \n with open(index_file, \"w\") as file:\n file.write(self.doc_strings[i].decode(\"utf-8\"))", "def __call__(self, doc):\n return doc", "def generate_docs(root_dir, session):\n ...", "def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)", "def documentation():\n return auto.html()", "def create_document(self, output):\n if not os.path.exists(self.template_path):\n raise IOError('Template file not found.')\n\n documents = []\n with open(self.template_path, 'rb') as f:\n data = f.read()\n template = Template(to_unicode(data))\n indent_targets = ['params', 'response_body']\n for v in self.vars:\n if self.template_path.endswith('.rst'):\n for k in indent_targets:\n lines = v[k].split('\\n')\n ret = []\n for i, l in enumerate(lines):\n if i > 0:\n ret.append(' {0}'.format(l).rstrip())\n else:\n ret.append(l)\n v[k] = '\\n'.join(ret)\n\n document = template.substitute(v)\n documents.append(document)\n\n with open(output, 'w') as f:\n f.write('\\n'.join(documents))", "def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")", "def get_documentation(path=\"\"):\n return \"\"\"<HTML><head><title>Python Minidoc for \"\"\"+path+\"\"\"</title></head>\n <body>\n \"\"\"+get_documentation_body(path)+\"\"\"\n </body></html>\"\"\"", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n modules = self.discover_modules()\r\n self.write_modules_api(modules,outdir)", "def write_doc(self, docname: str, doctree: nodes.document) -> None:\n self.fix_ids(doctree)\n self.add_visible_links(doctree, self.config.epub_show_urls)\n super().write_doc(docname, doctree)", "def document(url: str) -> Document:\n return Document(url)", "def CreateNewFile(self):\n\t\tself.acad.Documents.Add()", "def gen_cmake_doc(cmake_file, rst_file):\n should_skip = False\n basedir = os.path.dirname(rst_file)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n if not os.path.exists(rst_file):\n should_skip = False\n else:\n dest_mtime = os.stat(rst_file).st_mtime\n src_mtime = os.stat(cmake_file).st_mtime\n if src_mtime < dest_mtime:\n should_skip = True\n if should_skip:\n return\n print(\"Generating\", rst_file)\n with open(cmake_file, \"r\") as fp:\n txt = fp.read()\n rst = gen_rst(txt)\n with open(rst_file, \"w\") as fp:\n fp.write(\".. Generated by %s\\n.. DO NOT EDIT\\n\\n\" % sys.argv[0])\n fp.write(rst)", "def test_doc():\n pass", "def generate_api_docs(self):\n if self.API_OUTPUT_DIR:\n args = [\n # Put documentation for each module on its own page\n '-e',\n # don't create the \"modules.rst\" file (the table of contents\n # file) as this is already provided by the package's main rst\n # file.\n '-T',\n # Overwrite existing files\n '--force',\n '-o', self.API_OUTPUT_DIR,\n # the package to generate docs from\n self.PROJECT_DIR\n ]\n excludes = [\n os.path.join(self.PROJECT_DIR, p)\n if not os.path.isabs(p) else p\n for p in self.API_EXCLUDE_DIRS\n ]\n apidoc.main(args + excludes)", "def write_doc(self, docname, plain_text, comments, images, drawings, start, end, suggestions, log, flat_log):\n base_dir = os.path.realpath(os.path.join(\n KIOutils.dir_path(__file__), '..', 'downloaded', 'document', docname, '{}-{}'.format(str(start), str(end))))\n KIOutils.ensure_path(base_dir)\n writing_msg = 'Writing {} to disk'\n\n self.logger.info(writing_msg.format('drawings'))\n for i, drawing in enumerate(drawings):\n filename = os.path.join(base_dir, 'drawing' + str(i) + drawing[1])\n self.logger.debug('Writing drawing {} with name {}'.format(i, filename))\n with open(filename, 'wb') as f:\n f.write(drawing[0])\n\n self.logger.info(writing_msg.format('images'))\n for i, img in enumerate(images):\n filename = os.path.join(base_dir, 'img' + str(i) + img[1])\n self.logger.debug('Writing img {} with name {}'.format(i, filename))\n with open(filename, 'wb') as f:\n f.write(img[0])\n\n filename = os.path.join(base_dir, 'plain.txt')\n with open(filename, 'w') as f:\n self.logger.info(writing_msg.format('plain text'))\n f.write(plain_text.encode('utf-8'))\n\n filename = os.path.join(base_dir, 'comments.txt')\n with open(filename, 'w') as f:\n self.logger.info(writing_msg.format('comments'))\n f.write('\\n'.join(str(line) for line in comments))\n\n filename = os.path.join(base_dir, 'suggestions.txt')\n with open(filename, 'w') as f:\n self.logger.info(writing_msg.format('suggestions'))\n f.write(json.dumps(suggestions, ensure_ascii=False))\n\n filename = os.path.join(base_dir, 'revision-log.txt')\n with open(filename, 'w') as f:\n self.logger.info(writing_msg.format('revision log'))\n f.write('chunkedSnapshot\\n')\n for line in log['chunkedSnapshot']:\n f.write(json.dumps(line) + '\\n')\n f.write('changelog\\n')\n for line in log['changelog']:\n f.write(json.dumps(line) + '\\n')\n\n filename = os.path.join(base_dir, 'flat-log.txt')\n with open(filename, 'w') as f:\n self.logger.info(writing_msg.format('flat log'))\n for line in flat_log:\n f.write(line + '\\n')\n\n print '\\nFinished with output in directory', base_dir", "def _create_documenter(env: sphinx.environment.BuildEnvironment,\n documenter_cls: Type[sphinx.ext.autodoc.Documenter],\n name: str) -> sphinx.ext.autodoc.Documenter:\n bridge = _FakeBridge(env)\n documenter = documenter_cls(bridge, name)\n assert documenter.parse_name()\n assert documenter.import_object()\n if documenter_cls.objtype == 'class':\n bridge.genopt['special-members'] = [\n '__eq__',\n '__getitem__',\n '__setitem__',\n # '__hash__',\n '__init__',\n '__class_getitem__',\n '__call__',\n '__array__',\n ]\n try:\n documenter.analyzer = sphinx.pycode.ModuleAnalyzer.for_module(\n documenter.get_real_modname())\n # parse right now, to get PycodeErrors on parsing (results will\n # be cached anyway)\n documenter.analyzer.find_attr_docs()\n except sphinx.pycode.PycodeError:\n # no source file -- e.g. for builtin and C modules\n documenter.analyzer = None\n return documenter", "def generate():\n local('cd doc && make clean && make html')", "def build_document(self):\n pass", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def process_doxygen(self):\n if not getattr(self, \"doxygen_conf\", None):\n self.generator.bld.fatal(\"No doxygen configuration file supplied.\")\n if not isinstance(self.doxygen_conf, Node.Node):\n self.generator.bld.fatal(\"'doxygen_conf' must be a Node.\")\n\n self.create_task(\n \"doxygen\",\n self.doxygen_conf,\n cwd=self.bld.path.get_bld().abspath(),\n output=Context.STDERR,\n )", "def _write_docx(self):\n with ZipFile(self.out_file, 'w') as f:\n self._write_content_types(f)\n self._write_app(f)\n self._write_core(f)\n self._write_rels(f)\n self._write_document(f)\n self._write_fonts(f)\n self._write_document_rels(f)\n self._write_settings(f)\n self._write_styles(f)", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def generate_docs(self) -> List[Path]:\n outputs = []\n for file in self.files:\n if (stem := file.stem) == \"__init__\":\n # We might have more than one __init__.py file depending on package structure and these files shouldn't\n # contain methods, so we don't want to convert them\n continue\n\n if not (doc := get_doc(file)):\n continue # No docstring returned, skip this file\n doc = doc[33:] # First 33 characters are not required for our docs\n\n # Write the output we've generated to a file\n (output := self.directory / f\"{stem}.md\").write_text(generate_header(stem) + doc)\n outputs.append(output)\n return outputs", "def fini_doc(self):\n raise NotImplementedError()", "def build_docs(source, destination, doctrees):\n sphinx_argv = [\n '-b', 'html',\n '-d', doctrees,\n source,\n destination]\n\n sphinx_main(['sphinx-build'] + sphinx_argv)", "def create_new_doc(site_name, doc_name, directory=''):\n if not doc_name:\n raise AttributeError('no_name')\n\n if not directory:\n directory = '/'\n doc_name = pathlib.Path(doc_name).stem\n siteid = _get_site_id(site_name)\n ## if siteid is None:\n ## raise FileNotFoundError('no_site')\n dirid = _get_dir_id(siteid, directory)\n if dirid is None:\n raise FileNotFoundError('no_subdir')\n if doc_name in _get_docs_in_dir(dirid):\n raise FileExistsError\n\n new_doc_id = _add_doc()\n # add into site collection\n dts = datetime.datetime.utcnow()\n querystring = ('insert into {} (dir_id, docname, source_docid, source_updated) '\n 'values (%s, %s, %s, %s);')\n result = execute_query(querystring.format(TABLES[3]), (dirid, doc_name, new_doc_id, dts))", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def _generate_custom(project, docdir, gendir):\n custom_dir = os.path.join(docdir, 'generate')\n print(f\"Generating custom docs for {project} in {gendir!r}\")\n\n for root, _dirs, files in os.walk(custom_dir):\n subdir = root.split(custom_dir, 1)[1].strip('/')\n if subdir:\n try:\n os.mkdir(os.path.join(gendir, subdir))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n for script in sorted(x for x in files if not x.startswith(('.', '_'))):\n script_path = os.path.join(custom_dir, subdir, script)\n if not os.access(script_path, os.X_OK):\n continue\n\n fake_file = StringIO()\n with syspath(os.path.dirname(script_path)):\n module = import_module(os.path.basename(os.path.splitext(script_path)[0]))\n module.main(fake_file, docdir=docdir, gendir=gendir)\n\n fake_file.seek(0)\n if data := fake_file.read():\n rst = os.path.join(gendir, subdir, os.path.splitext(script)[0] + '.rst')\n print(f\"generating {rst}\")\n with open(rst, 'w') as f:\n f.write(data)", "def createManPage(target, source, env):\n os.system('asciidoc -d manpage -b docbook -o alock.xml ' + str(source[0]))\n os.system('xmlto man alock.xml')\n os.remove('alock.xml')\n return None", "def main(rc):\n with store_client(rc) as sclient:\n for doc in rc.documents:\n sclient.copydoc(doc)", "def new_document(klass, name=None, author=None):\n doc = Factory.new_document(klass, author)\n doc.name = name\n doc._osl.id = uuid.uuid4()\n return doc", "def build(self) -> None:\n\n print(\"Genereting files..\")\n self.doc = self.doc + r'\\end{document}'\n\n f = open(\"latex\\\\\" + self.report_name + '.tex', 'w')\n f.write(self.doc)\n f.close()\n\n os.chdir('latex')\n\n cmd = ['pdflatex', '-interaction', 'nonstopmode', self.report_name + '.tex']\n #cmd = ['pdflatex', '-interaction', self.report_name + '.tex']\n\n for i in range(2):\n proc = subprocess.Popen(cmd)\n proc.communicate()\n retcode = proc.returncode\n if not retcode == 0:\n os.chdir('..')\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n os.unlink(self.report_name + '.aux')\n os.unlink(self.report_name + '.lof')\n os.unlink(self.report_name + '.log')\n os.unlink(self.report_name + '.lot')\n os.unlink(self.report_name + '.out')\n os.unlink(self.report_name + '.toc')\n\n os.chdir('..')", "def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()", "def createDocument(self, file, description=\"\", index=None):\n\n assert isinstance(index, int) or index is None\n header = self._baseHeader.copy().copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n uploadFile = MultiPartFormRequest(file)\n\n skeletonDocument= {\"title\" : uploadFile.title, \"description\" : description, \"extension\" : uploadFile.extension}\n jsonString = json.dumps(skeletonDocument)\n\n try:\n if index is None:\n url = self.metaData.getLink(\"create-document\")\n else:\n url = self.getFolders()[index].metaData.getLink(\"create-document\")\n\n assert url is not None\n\n response = self._adapter.postRequest(url, header, jsonString)\n\n uploadUrl = HyperLinkResource(response['Body']).getLink(\"upload\")\n selfUrl = HyperLinkResource(response['Body']).selfLink\n assert uploadUrl is not None\n\n boundary = uploadFile.create_boundary_string()\n header['Content-Type'] = \"mutipart/form-data; boundary=\" + boundary\n body = uploadFile.encode_mutipart_form_data(boundary)\n header['Content-Length'] = len(body)\n\n response = self._adapter.putRequest(uploadUrl, header, body)\n\n return Document(self._client, selfUrl)\n except IndexError:\n print(\"the index: \" + index + \" does not exist in the list of folder numbers we have\")", "def dummy(doc):\r\n return doc", "def view_doc(request):\n cfg = request.cfg\n document = request.where\n filename = cfg.path(os.path.join(cfg.options.template_dir, \"docroot\", document))\n\n # Stat the file to get content length and last-modified date.\n try:\n info = os.stat(filename)\n except OSError as v:\n raise ViewVCException(\n 'Static file \"%s\" not available (%s)' % (document, str(v)), \"404 Not Found\"\n )\n content_length = str(info[stat.ST_SIZE])\n last_modified = info[stat.ST_MTIME]\n\n # content_length + mtime makes a pretty good etag.\n if check_freshness(request, last_modified, \"%s-%s\" % (content_length, last_modified)):\n return\n\n try:\n fp = open(filename, \"rb\")\n except IOError as v:\n raise ViewVCException(\n 'Static file \"%s\" not available (%s)' % (document, str(v)), \"404 Not Found\"\n )\n\n if document[-3:] == \"png\":\n mime_type = \"image/png\"\n elif document[-3:] == \"jpg\":\n mime_type = \"image/jpeg\"\n elif document[-3:] == \"gif\":\n mime_type = \"image/gif\"\n elif document[-3:] == \"css\":\n mime_type = \"text/css\"\n else: # assume HTML\n mime_type = None\n copy_stream(fp, get_writeready_server_file(request, mime_type, content_length=content_length))\n fp.close()", "def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )", "def write_comments_in_xacro(doc, filename):\n if(not os.path.isdir(filename[0:filename.rfind('/') + 1])):\n os.makedirs(filename[0:filename.rfind('/')])\n\n file = open(filename, 'w+')\n file.write(doc.toprettyxml())\n file.close()\n file = open(filename, 'r')\n firstline, remaining = file.readline(), file.read()\n file.close()\n file = open(filename, 'w')\n file.write(firstline)\n file.write(\n '<!--**************************************************************\\n'\n ' **** File automatically generated by generate_urdf.py script ****\\n'\n ' **************************************************************-->\\n')\n file.write(remaining)\n file.close()", "def _genspider(self, module, name, domain, template_name, template_file):\n tvars = {\n 'project_name': settings.get('BOT_NAME'),\n 'ProjectName': string_camelcase(settings.get('BOT_NAME')),\n 'module': module,\n 'name': name,\n 'domain': domain,\n 'classname': '%sSpider' % ''.join([s.capitalize() \\\n for s in module.split('_')])\n }\n\n spiders_module = __import__(settings['NEWSPIDER_MODULE'], {}, {}, [''])\n spiders_dir = abspath(dirname(spiders_module.__file__))\n spider_file = \"%s.py\" % join(spiders_dir, module)\n\n shutil.copyfile(template_file, spider_file)\n render_templatefile(spider_file, **tvars)\n print \"Created spider %r using template %r in module:\" % (name, \\\n template_name)\n print \" %s.%s\" % (spiders_module.__name__, module)", "def docx():\n env.file_ext = \".docx\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} --bibliography={bib_file} --csl={csl_file} --toc\".format(**env))", "def generate_pdf(list,id):\n\n doc = SimpleDocTemplate(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+str(id)+\".pdf\")\n\n Story = [Spacer(1,2*inch)]\n styles = stylesheet()\n global Title\n\n # Add 10 questions with boxes below\n for i in list:\n if not i[0] in \"skills-scan\" and not i[0] in \"csrfmiddlewaretoken\" and not i[0] in \"titre\" and not i[0] in \"custom\":\n tmp = int(i[0])+1\n bogustext = (str(tmp)+\". %s\" % i[1])\n p = Paragraph(bogustext, styles['default'])\n # Write the paragraph\n\n draw = Drawing()\n # rect(x1,y1,width,height)\n rec = Rect(0, 100, 450, 150)\n rec.fillColor = colors.white\n # draw the rect under each paragraph\n draw.add(rec)\n p.keepWithNext = True\n Story.append(p)\n Story.append(draw)\n Story.append(Spacer(1,-0.9 * inch))\n elif i[0] in \"titre\":\n Title = i[1]\n # build the document by inserting the whole story\n doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)\n return str(id)+\".pdf\"", "def testMakeDocument(self):\n\n # I've split the wanted result string up into substrings so I can\n # amend it more easily (or so I hope).\n trivial_package = \"\"\"\\\n<document source=\"Package trivial_package\">\n <section class=\"package\" id=\"package-trivial-package\" name=\"package trivial_package\">\n <title>\n Package trivial_package\\n\"\"\"\n\n # The \"xml:space\" attribute is by observation, not prediction\n module_init = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-init\" name=\"module trivial_package.__init__\">\n <title>\n Module trivial_package.__init__\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n A simple docstring.\\n\"\"\"\n\n module_file1 = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-file1\" name=\"module trivial_package.file1\">\n <title>\n Module trivial_package.file1\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n This is the first example file. It *does* use reStructuredText.\n <section class=\"class\" id=\"class-trivial-package-file1-fred\" name=\"class trivial_package.file1.fred\">\n <title>\n Class trivial_package.file1.Fred\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n An example class - it announces each instance as it is created.\\n\"\"\"\n\n module_file2 = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-file2\" name=\"module trivial_package.file2\">\n <title>\n Module trivial_package.file2\n <literal_block class=\"docstring\" xml:space=\"preserve\">\n This module is *not* using reStructuredText for its docstrings.\\n\"\"\"\n\n non_python_file = \"\"\"\\\n <section class=\"file\" id=\"file-trivial-package-not-python\" name=\"file trivial_package.not_python\">\n <title>\n File trivial_package.not_python\n <paragraph>\n File \n <literal>\n not_python\n is not a Python module.\\n\"\"\"\n\n sub_package = \"\"\"\\\n <section class=\"package\" id=\"package-trivial-package-sub-package\" name=\"package trivial_package.sub_package\">\n <title>\n Package trivial_package.sub_package\\n\"\"\"\n\n sub_module_init = \"\"\"\\\n <section class=\"module\" id=\"module-trivial-package-sub-package-init\" name=\"module trivial_package.sub_package.__init__\">\n <title>\n Module trivial_package.sub_package.__init__\\n\"\"\"\n\n wanted_result = (trivial_package + module_init + module_file1 +\n module_file2 + non_python_file + sub_package +\n sub_module_init)\n\n tree = parse_package(\"trivial_package\")\n\n document = make_document(tree)\n\n actual_result = document.pformat()\n\n if wanted_result != actual_result:\n print \"+++++++++++++++++++++++++ WANT\"\n print wanted_result\n print \"+++++++++++++++++++++++++ GOT\"\n print actual_result\n print \"+++++++++++++++++++++++++\"\n\n self.assertEqual(actual_result,wanted_result)", "def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def __create_document(self):\n doc = xml.dom.minidom.Document()\n kml = doc.createElement('kml')\n kml.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\n doc.appendChild(kml)\n document = doc.createElement('Document')\n kml.appendChild(document)\n docName = doc.createElement('name')\n document.appendChild(docName)\n docName_text = doc.createTextNode(self['name'])\n docName.appendChild(docName_text)\n docDesc = doc.createElement('description')\n document.appendChild(docDesc)\n docDesc_text = doc.createTextNode(self['description'])\n docDesc.appendChild(docDesc_text)\n return doc", "async def handle_doc(self, request: web.Request) -> web.Response:\n spec = request.app[\"spec\"]\n spec_url = request.app.router[\"openapi_spec\"].url_for()\n title = spec.info.title\n html = f\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n <!-- needed for adaptive design -->\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n \"\"\"\n if self.font:\n html += f\"\"\"\n <link href=\"https://fonts.googleapis.com/css?{self.font}\" rel=\"stylesheet\">\n \"\"\"\n html += f\"\"\"\n <link rel=\"shortcut icon\" href=\"{self.favicon_url}\">\n <!--\n ReDoc doesn't change outer page styles\n -->\n <style>\n body {{\n margin: 0;\n padding: 0;\n }}\n </style>\n </head>\n <body>\n <redoc spec-url=\"{spec_url}\"></redoc>\n <script src=\"{self.redoc_js_url}\"> </script>\n </body>\n </html>\n \"\"\"\n return web.Response(text=html, content_type=\"text/html\")", "def create_new_doc(self, doc: Doc) -> Doc:\n\n return spacy.tokens.Doc(self.model.vocab, [tok.text for tok in doc], #type: ignore\n [tok.whitespace_ for tok in doc])", "def save(self):\n html_file = '{}/{}.html'.format(self.web_dir, self.title)\n f = open(html_file, 'wt')\n f.write(self.doc.render())\n f.close()", "def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)", "def __buildDocumentClassDocString():\n\n # build a dictionary of tags and their descriptions, seems a little over\n # the top, but keeps all the information in one place\n tagsStrings = {\n \"comment\" : \"Define the comment string\",\n \"define\" : \"Define the symbol name for #define's\",\n \"info\" : \"Information string, to end up in the 'info' output\",\n \"instance\" : \"Instance name\",\n \"matlabRoot\" : \"Name of variable used by the matlab output\",\n \"members\" : \"List of symbols, which are going to be children of this symbol\",\n \"name\" : \"Name of this symbol\",\n \"size\" : \"Size of this symbol, i.e. indicate it is an array\",\n \"subtype\" : \"Define the actual type of general symbol\",\n \"symbol\" : \"Define a symbol, either a top level entity a child in a members\",\n \"test\" : \"Define the preprocessor test\",\n \"text\" : \"Text to put into a banner symbol\",\n \"title\" : \"Set the overall document title\",\n \"value\" : \"Define a value for this symbol\",\n \"valuesRequired\" : \"Does the enumeration allow automatic value assignment in entries\",\n }\n # build the list of classes\n classes = dict(filter(lambda (k,v): type(v) == types.ClassType, globals().iteritems()))\n (tagsUsed, optionsUsed) = buildKeys(classes)\n\n # build the string we are going to add to the document class\n s = \"Document class that represents the XML document and contains the data.\\n\\n\"\n s += \"Available tags:\\n\"\n\n for tag in tagsStrings:\n try:\n used = \" Required by : %s\\n\" % (\", \".join(tagsUsed[tag]))\n except KeyError:\n used = \"\"\n try:\n opts = \" Optional for: %s\\n\" % (\", \".join(optionsUsed[tag]))\n except KeyError:\n opts = \"\"\n s += \" %s\\n %s\\n %s\\n\\n%s%s\\n\" % (tag, \"-\"*len(tag), tagsStrings[tag], used, opts)\n\n return s", "def download_sd_doc(pii, view='full'):\n\n file_path = os.path.join(outdir, 'sd-download', pii+'-'+view+'.xml')\n if not os.path.exists(file_path):\n print(' Download:', pii + '-' + view + '.xml')\n\n url = 'http://api.elsevier.com/content/article/pii:' + pii\n vals = {'view': view,\n 'apikey': SD_API_KEY}\n r = requests.get(url, params=vals)\n\n if r.status_code != requests.codes.ok:\n print('!! ScienceDirect server error:', r.status_code,\n file=sys.stderr)\n print(r.text, file=sys.stderr)\n return\n\n with io.open(file_path, 'w', encoding='utf8') as out:\n out.write(r.text)", "def dumpDoc(modulename, directory=None):\n docco = getObjectsDefinedIn(modulename, directory)\n print('codegrab.py - ReportLab Documentation Utility')\n print('documenting', modulename + '.py')\n print('-------------------------------------------------------')\n print()\n if docco.functions == []:\n print('No functions found')\n else:\n print('Functions:')\n for f in docco.functions:\n print(f.proto)\n print(' ' + f.doc)\n\n if docco.classes == []:\n print('No classes found')\n else:\n print('Classes:')\n for c in docco.classes:\n print(c.name)\n print(' ' + c.doc)\n for m in c.methods:\n print(m.proto) # it is already indented in the file!\n print(' ' + m.doc)\n print()", "def create_blank_index(fname: str) -> None:\n with open(fname, \"w\") as outfile:\n outfile.write(\"<!DOCTYPE HTML>\\n\")\n outfile.write(\"<html lang=\\\"en\\\">\\n\")\n outfile.write(\" <head>\\n\")\n outfile.write(\" <meta charset=\\\"utf-8\\\" />\\n\")\n outfile.write(\" <title>n/a</title>\\n\")\n outfile.write(\" <meta name=\\\"description\\\" content=\\\"n/a\\\" />\\n\")\n outfile.write(\" </head>\\n\")\n outfile.write(\" <body>\\n\")\n outfile.write(\" </body>\\n\")\n outfile.write(\"</html>\\n\")", "def openDoc (self):\n fileName = QFileDialog.getOpenFileName(self,\n self.tr(\"Open File\"), \"\", \"All documents (*.%s;*.%s;*.%s;*.%s;*.%s;*.%s;*.%s);;Tests abstract (*.%s);;Tests unit (*.%s);;Tests suite (*.%s);;Tests plan (*.%s);;Tests global (*.%s);;Tests config (*.%s);;Tests data (*.%s)\" %\n ( TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE, \n TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE) )\n \n # new in v17.1\n if QtHelper.IS_QT5:\n _fileName, _type = fileName\n else:\n _fileName = fileName\n # end of new\n \n if not len(_fileName):\n return\n \n extension = str(_fileName).rsplit(\".\", 1)[1]\n if not ( extension.lower() in [ TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE,\n TestData.TYPE, TestUnit.TYPE, TestAbstract.TYPE ] ):\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"File not supported\") )\n return\n \n tmp = str(_fileName).rsplit(\"/\", 1)\n path = tmp[0]\n if len(tmp) > 1:\n _filename = tmp[1].rsplit(\".\", 1)[0]\n else:\n _filename = tmp[0].rsplit(\".\", 1)[0]\n self.newTab( path = path, filename = _filename, \n extension = extension, repoDest=UCI.REPO_UNDEFINED)", "def document(self, outputDir, docFormat=MARKDOWN):\n self.__masterFunctions.document(outputDir, docFormat)", "def create_document(lines_of_document):\n\n Document = collections.namedtuple('Document', ['header', 'body'])\n document = Document(body=' '.join(lines_of_document[1:]),\n header=lines_of_document[0])\n\n return document", "def javadoc(self, irc, msg, args, num, req):\n self.googleq('download.oracle.com/javase/6/docs/', req, num, irc)", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )", "def serve_sphinx_docs():\n\n return redirect(\n \"https://final-epam.readthedocs.io/en/latest/main_launch.html#main_launch.compile\",\n code=302,\n )", "def NewDoc(self, cur, URL, encoding, options):\n ret = libxml2mod.xmlReaderNewDoc(self._o, cur, URL, encoding, options)\n return ret", "def create_file(self, file_name=\"result\", extension=\"html\"):\n with open(f\"{file_name}.{extension}\", \"a\") as f:\n f.write(\"<!DOCTYPE html>\\n<html>\\n<head>\\n <meta charset='utf-8'>\")\n for head_element in self.head:\n f.write(head_element)\n f.write(\"\\n</head>\\n<body>\")\n for body_element in self.body:\n f.write(body_element)\n f.write(\"\\n</body>\\n</html>\")", "def createFakeSphinxProject(self):\n self.sourceDir.child(\"conf.py\").setContent(self.confContent.encode())\n self.sourceDir.child(\"index.rst\").setContent(self.indexContent.encode())", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', type=argparse.FileType('r'), nargs='+', help='The list of files to generate strdl documentation for')\n args = parser.parse_args()\n for file in args.filename:\n strdl_gen.generate_file(strdl_parser.parse(file))", "def build_docs(open_docs):\n python_call(\"pip\", [\"install\", \"src/[docs]\"])\n python_call(\"pip\", [\"install\", \"-r\", \"src/requirements.txt\"])\n python_call(\n \"ipykernel\", [\"install\", \"--user\", \"--name=za_covid_map\"]\n )\n shutil.rmtree(\"docs/build\", ignore_errors=True)\n call(\n [\n \"sphinx-apidoc\",\n \"--module-first\",\n \"-o\",\n \"docs/source\",\n \"src/za_covid_map\",\n ]\n )\n call([\"sphinx-build\", \"-M\", \"html\", \"docs/source\", \"docs/build\", \"-a\"])\n if open_docs:\n docs_page = (Path.cwd() / \"docs\" / \"build\" / \"html\" / \"index.html\").as_uri()\n secho(\"Opening {}\".format(docs_page))\n webbrowser.open(docs_page)", "def write_doc(self, file=sys.stdout, tm=False):\n for sentence in self.sentences:\n if tm:\n print(\"<tu><tuv><seg>\", file=file)\n print(\"{}\".format(sentence.raw), file=file)\n if tm:\n print(\"</seg></tuv><tuv><seg>\", file=file)\n print(\"{}\".format(sentence.translation), file=file)\n if tm:\n print(\"</seg></tuv></tu>\", file=file)", "def get_documentation(self, *args, **dargs):\n pass" ]
[ "0.7526909", "0.67890173", "0.65753114", "0.6515147", "0.65132904", "0.65004843", "0.6358768", "0.63038355", "0.6276474", "0.621243", "0.61798906", "0.616652", "0.6122261", "0.610907", "0.61049974", "0.6098085", "0.6066121", "0.60467553", "0.60288143", "0.60257435", "0.60220516", "0.5996639", "0.59621865", "0.5945695", "0.59330475", "0.59314984", "0.59013695", "0.5893945", "0.5877123", "0.58691263", "0.5860558", "0.5856703", "0.5851177", "0.584886", "0.58337957", "0.58287877", "0.58144605", "0.57870233", "0.5775309", "0.57712704", "0.5729315", "0.56998783", "0.5690636", "0.5677347", "0.5662719", "0.5647973", "0.5647049", "0.5643877", "0.5641788", "0.56394506", "0.5627082", "0.56201774", "0.56069195", "0.5606374", "0.5592281", "0.5592281", "0.5575708", "0.55674106", "0.5540446", "0.5535355", "0.55338645", "0.5529201", "0.55274343", "0.55186266", "0.5515826", "0.55130506", "0.5509456", "0.54989153", "0.549039", "0.54868734", "0.5480049", "0.5476669", "0.5471819", "0.54638755", "0.5462329", "0.5452019", "0.54380727", "0.5436385", "0.54350626", "0.54335403", "0.5430551", "0.5428783", "0.542399", "0.5418583", "0.54118794", "0.5406298", "0.5404629", "0.5403967", "0.540324", "0.53973144", "0.53793", "0.5377834", "0.53746545", "0.5373853", "0.5368497", "0.53684574", "0.5356495", "0.53499794", "0.53478086", "0.5344843", "0.5330315" ]
0.0
-1
Output the data in slice iz, row ix of an image to standard out.
def dump_row(input, fname, ix=0, iz=0): fout = open(fname, "w") image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() fout.write("# z = %d slice, x = %d row)\n" % (iz, ix)) line = [] for iy in xrange(ny): fout.write("%d\t%12.5g\n" % (iy, image.get_value_at(ix,iy,iz))) fout.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def write_window(img, ds, window):\n new_img = np.array([img[:, :, i] for i in range(img.shape[2])])\n ds.write(new_img, window=window)", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def observation(self, img):\r\n img = img.transpose(1, 2, 0)\r\n return img", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def _write_image(self):\r\n # Create an output raster with the correct number of rows and columns.\r\n gtiff_driver = gdal.GetDriverByName('GTiff')\r\n out_ds = gtiff_driver.Create(os.path.join(self.out_folder, self.out_file_name), self.column, self.row, 1)\r\n out_ds.SetProjection(self.in_ds.GetProjection())\r\n\r\n # Convert the offsets to real-world coordinates for the georeferencing info.\r\n # We can't use the coordinates above because they don't correspond to the pixel edges.\r\n subset_ulx, subset_uly = gdal.ApplyGeoTransform(self.in_gt, self.off_ulx, self.off_uly)\r\n out_gt = list(self.in_gt)\r\n out_gt[0] = subset_ulx\r\n out_gt[3] = subset_uly\r\n out_ds.SetGeoTransform(out_gt)\r\n\r\n data = self.read_image()\r\n out_band = out_ds.GetRasterBand(1)\r\n out_band.WriteArray(data)\r\n\r\n del out_ds", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def write_kernel(w, k):\n w.writeln(\"void {k}(const Image<int>& in, Image<int>& out\".format(k=k.name))\n # write the tap signal in the function argument list\n for tapName in k.rtapNames:\n #tapType = k.edges[tapName].dtype\n #tapCType = dtypeMap[tapType]\n tapCType = getCType(k.edges[tapName])\n for indices in expand_range(k.edges[tapName].dim):\n w.writeln(\"\\t, {type} {sig}\".format(type=tapCType, sig=mangle((tapName, indices))))\n w.writeln(\")\")\n w.writeln(\"{\")\n w.indent()\n # TODO: insert size error checking into C code here\n\n w.writeln(\"for(int y = 0; y < in.height(); y++){\")\n w.indent()\n w.writeln(\"for(int x = 0; x < in.width(); x++){\")\n w.indent()\n\n \n # Grab the register declaration for the partial-pixel output and blow it into\n # the complete list of input registers\n startName = k.ppoutName\n #startType = k.edges[startName].dtype\n #startCType = dtypeMap[startType]\n startCType = getCType(k.edges[startName])\n for indices in expand_range(k.edges[startName].dim):\n # HACK: work with multi-channel or single-channel images\n z_idx = 0\n if len(indices) == 3:\n z_idx = indices[2]\n\n w.writeln(\"{type} {reg} = in(x+{xoff}, y+{yoff}, {z});\".format(\n type=startCType,\n reg=mangle((startName, indices)),\n xoff=(indices[0]-k.centroid[0]), \n yoff=(indices[1]-k.centroid[1]), z=z_idx))\n \n # Set up the constants\n for const in k.constants:\n # TODO: be careful here, because we need to be consistent with naming/indexing\n # TODO: handle int/float; infer datatype in parser\n w.writeln(\"const float {reg} = {val};\".format(reg=mangle((const[0], [0])), val=const[1]))\n \n w.writeln(\"\")\n\n\n #Special Register Examples for Reduce:\n #fix_17_0 pixel_out_pos[1:0] # Location of Reduce pixel in output image\n #fix_17_0 centroid_pos[1:0] # Location of Centroid in input image\n if \"centroid_pos\" in k.specialRegs:\n w.writeln(\"int centroid_pos_0 = x;\")\n w.writeln(\"int centroid_pos_1 = y;\")\n\n if \"pixel_out_pos\" in k.specialRegs:\n w.writeln(\"int pixel_out_pos_0 = x;\")\n w.writeln(\"int pixel_out_pos_1 = y;\")\n \n # Create a list of (name, index) tuples representing the valid (i.e., evaluated) signal\n validRegs = [(startName, i) for i in expand_range(k.edges[startName].dim)]\n validRegs += [(tapName, i) for tapName in k.rtapNames \n for i in expand_range(k.edges[tapName].dim)]\n validRegs += [(regName, i) for regName in k.specialRegs \n for i in expand_range(k.edges[regName].dim)]\n validRegs += [(c[0], [0]) for c in k.constants]\n \n # Make a copy of the list of operations which we can remove stuff from\n unprocessed = dict(k.ops)\n \n # Process all the operations\n while len(unprocessed) > 0:\n progress = False\n for opKey in unprocessed:\n op = k.ops[opKey]\n # Find an operation that can be evaluated\n if opOk(op, validRegs):\n #dtype = k.edges[op.result[0]].dtype\n #dtype = dtypeMap[dtype] # Look up the C-equivalent for this type\n dtype = getCType(k.edges[op.result[0]])\n # TODO: include integer/fraction width\n \n # TODO: error checking that we have the right number of operands - this should be done in the parser, actually\n # Evaluate it\n if op.name in ['max', 'min']:\n write_complex_op(w, op, dtype)\n elif op.name == \"sum\": \n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"mv\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"add\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"sub\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' - ', mangle(op.operands))))\n elif op.name == \"mult\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' * ', mangle(op.operands))))\n elif op.name == \"div\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' / ', mangle(op.operands))))\n\n elif op.name == \"lshift\":\n w.writeln(\"{dtype} {dst} = {op1} << {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"rshift\":\n w.writeln(\"{dtype} {dst} = {op1} >> {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"and\":\n w.writeln(\"{dtype} {dst} = {op1} & {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"or\":\n w.writeln(\"{dtype} {dst} = {op1} | {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"ne\":\n w.writeln(\"{dtype} {dst} = {op1} != {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"eq\":\n w.writeln(\"{dtype} {dst} = {op1} == {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lt\":\n w.writeln(\"{dtype} {dst} = {op1} < {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lte\":\n w.writeln(\"{dtype} {dst} = {op1} <= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gt\":\n w.writeln(\"{dtype} {dst} = {op1} > {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gte\":\n w.writeln(\"{dtype} {dst} = {op1} >= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"not\":\n w.writeln(\"{dtype} {dst} = !{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"abs\":\n w.writeln(\"{dtype} {dst} = ({src} >= 0) ? {src} : (-{src});\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"inv\":\n w.writeln(\"{dtype} {dst} = -{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n\n elif op.name == \"mux\":\n w.writeln(\"{dtype} {dst} = {cond} ? {op1} : {op2};\".format(dtype=dtype, dst=mangle(op.result), \\\n cond=mangle(op.operands[0]), op1=mangle(op.operands[1]), op2=mangle(op.operands[2])))\n else:\n print \"Unhandled operator \" + opKey\n \n validRegs.append(op.result)\n # Remove it from the list\n unprocessed.pop(opKey)\n progress = True\n break # We changed the list, so we gotta start over\n \n # If we went through the whole list without finding any ops to evaluate,\n # something is wrong and we need to give up.\n if progress is False:\n print \"Failed to evaluate some ops!\"\n for opKey in unprocessed:\n print \"\\t %s %s\" % (unprocessed[opKey].name, unprocessed[opKey].result)\n break\n \n for indices in expand_range(k.edges[k.sink].dim):\n #writeln('printf(\"result: %f\\\\n\", {reg});'.format(reg=mangle((k.sink, indices))))\n # TODO: make this handle depths other than 3\n w.writeln('out(x,y,{z}) = {reg};'.format(z=indices[0], reg=mangle((k.sink, indices))))\n\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"} // END %s\" % k.name)\n w.writeln(\"\\n\")", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def get_image_summary(img, idx=0):\n\n V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))\n V -= tf.reduce_min(V)\n V /= tf.reduce_max(V)\n V *= 255\n\n img_w = tf.shape(img)[1]\n img_h = tf.shape(img)[2]\n V = tf.reshape(V, tf.stack((img_w, img_h, 1)))\n V = tf.transpose(V, (2, 0, 1))\n V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))\n return V", "def create_displayable_test_output(test_image):\n if hasattr(test_image, \"numpy\"):\n return np.squeeze(test_image.numpy())[:, :, 1:]\n else:\n return np.squeeze(test_image)[:, :, 1:]", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def write(self, data, roi=None):\n if not self.recording:\n return\n\n roi = roi or self.roi\n\n name = 'img{:04d}'.format(self._index)\n try:\n self.db.get_node('/images/' + name).remove()\n except tables.NoSuchNodeError:\n pass\n finally:\n # TODO: Adapt to CArray for compression\n # filters = tables.Filters(complevel=5, complib='zlib')\n arr = self.db.create_array('/images', name, data)\n arr.attrs.timestamp = datetime.strftime(\n datetime.now(), '%Y-%m-%d %H:%M:%S.%f')\n arr.attrs.roi = roi\n arr.flush()\n self.db.flush()\n\n self._index = self._index + 1 if self._index < self.N - 1 else 0", "def printImage(imageObject):\n # TODO\n pass", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def outImg(arrX, arrY, num_out, out_path):\n m, n = np.shape(arrX)\n if num_out > m:\n print(\"Invalid argument num_out. Must be <= total number of images, \"+str(m))\n return 0\n\n for i in range(num_out):\n img = np.array(arrX[i])\n img = img.reshape(28,28)\n outfile = str(i) + \"_\" + str(arrY[i]) + \".png\"\n plt.figure()\n plt.imshow(img, cmap = 'binary') \n plt.savefig(outpath + \"/\" + outfile)", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def __write_out_row__(self):\n column_pointer = spacing\n\n row_height = np.max([b.shape[0] for b in self.row_bitmaps])\n\n with open(\"active_weather.basic.exp\"+str(self.box_count)+\".box\",\"a\") as f:\n for char,b in zip(self.row_characters,self.row_bitmaps):\n assert isinstance(b, np.ndarray)\n height, width = b.shape\n\n # row first and then column\n additional_height = row_height-height\n\n self.training_page[self.row_pointer+additional_height:self.row_pointer + height+additional_height, column_pointer:column_pointer + width] = b\n a, b, c, d, e = char, column_pointer, self.height - (self.row_pointer + height + additional_height), column_pointer + width, self.height - (self.row_pointer+additional_height)\n f.write(str(a) + \" \" + str(b) + \" \" + str(c+1) + \" \" + str(d-1) + \" \" + str(e) + \" 0\\n\")\n\n column_pointer += width + spacing\n\n\n self.row_pointer += spacing + row_height\n self.column_pointer = spacing\n\n self.row_bitmaps = []\n self.row_characters = []", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def write(self, data: np.ndarray) -> None:\n assert data.ndim == 1\n all_data = np.hstack((self.res, data))\n nrows = len(all_data) // self.width\n if nrows > 0:\n d = all_data[0: nrows * self.width].reshape(nrows, self.width)\n w = Window(0, self.rows_written, d.shape[1], d.shape[0])\n self.f.write(d, 1, window=w)\n self.rows_written += nrows\n self.res = all_data[nrows * self.width:]\n else:\n self.res = all_data", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def img_gen_scat():\n data = pd.DataFrame(data=np.random.rand(5,1), index=range(1,6), columns=['Fred'])\n #m,n = np.shape(data)\n print(data)\n plt.clf()\n plt.scatter(x=data.index.values, y=data.values.ravel(), color='k') # figsize=(10, 6))\n # Options for later from https://matplotlib.org/api/_as_gen/matplotlib.pyplot.bar.html\n # bar_width = 0.35\n # alpha = .3\n fig=plt.gcf()\n fig.set_size_inches(1, 1)\n plt.axis('off')\n fig.tight_layout()\n fig.canvas.draw()\n # grab the pixel buffer and dump it into a numpy array\n pixels = np.array(fig.canvas.renderer._renderer)[:,:,:3]\n print(pixels, data.index.values + data.values.ravel() )\n\n return pixels, np.append(data.index.values , data.values.ravel());", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def show_all(img, overlay=None, axis='z'):\n xlen, ylen, zlen = img.GetSize()\n all_images = []\n all_overlays = []\n if axis == 'z':\n all_images = [img[:, :, z] for z in xrange(zlen)]\n if overlay:\n all_overlays = [overlay[:, :, z] for z in xrange(zlen)]\n elif axis == 'y':\n all_images = [img[:, y, :] for y in xrange(ylen)]\n if overlay:\n all_overlays = [overlay[:, y, :] for y in xrange(ylen)]\n elif axis == 'x':\n all_images = [img[x, :, :] for x in xrange(xlen)]\n if overlay:\n all_overlays = [overlay[x, :, :] for x in xrange(xlen)]\n else:\n raise Exception('invalid axis')\n\n for i, image in enumerate(all_images):\n if overlay:\n show_one(sitk.LabelOverlay(image, all_overlays[i]))\n else:\n show_one(image)\n plt.show()", "def _render(self, mode='human', close=False):\n if close:\n return\n\n outfile = io.StringIO() if mode == 'ansi' else sys.stdout\n\n grid = np.arange(self.nS).reshape(self.shape)\n it = np.nditer(grid, flags=['multi_index'])\n while not it.finished:\n s = it.iterindex\n y, x = it.multi_index\n\n if self.s == s:\n output = \" x \"\n elif s == 0 or s == self.nS - 1:\n output = \" T \"\n else:\n output = \" o \"\n\n if x == 0:\n output = output.lstrip()\n if x == self.shape[1] - 1:\n output = output.rstrip()\n\n outfile.write(output)\n\n if x == self.shape[1] - 1:\n outfile.write(\"\\n\")\n\n it.iternext()", "def write(self, image):\n raise NotImplementedError()", "def data_callback(data_in):\n global data\n global cnt\n global outfile\n # print(data_in[0][0])\n data = np.roll(data, -1, axis=0)\n data[-1,:] = data_in\n outfile.write(\"{},{},{},{},{},{},{},{}\\n\".format(data[0][0], data[0][1], data[0][2], data[0][3], data[0][4], data[0][5], data[0][6], data[0][7]))\n cnt += 1", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def display(self):\n nrow = 1\n ncol = len(self.views) + 1\n rows = [(self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, (title, img) in zip(axes.ravel(),\n [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def dump(self, ipoint, isave):\n\n for key, vals in self.resfi.items():\n outName = '{}/{}_{}_{}.hdf5'.format(self.outDir,\n self.dbName, key, self.num)\n if vals is not None:\n # transform to astropy table to dump in hdf5 file\n tab = Table.from_pandas(vals)\n keyhdf = 'metric_{}_{}_{}'.format(self.num, ipoint, isave)\n tab.write(outName, keyhdf, append=True, compression=True)\n\n # reset the metric after dumping\n for metric in self.metricList:\n self.resfi[metric.name] = pd.DataFrame()", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def export_figure(idx_row, df_slices_info, path_out):\n _, row = idx_row\n img_name = os.path.splitext(os.path.basename(row['path_image']))[0]\n\n try:\n if img_name not in df_slices_info.index:\n logging.debug('missing image in annotation - \"%s\"', img_name)\n return\n\n img = tl_data.io_imread(row['path_image'])\n segm = tl_data.io_imread(row['path_segm'])\n df = pd.read_csv(os.path.join(row['path_centers']), index_col=0)\n centres = df[['X', 'Y']].values\n\n fig = figure_draw_img_centre_segm(None, img, centres, segm)\n\n row_slice = df_slices_info.loc[img_name]\n fig = figure_draw_annot_csv(fig, img, row_slice)\n\n tl_visu.figure_image_adjustment(fig, img.shape)\n fig.savefig(os.path.join(path_out, img_name + '.png'))\n plt.close(fig)\n except Exception:\n logging.exception('failed for: %s', img_name)", "def show_one(img):\n dpi = 40\n margin = 0.05\n nda = sitk.GetArrayFromImage(img)\n spacing = img.GetSpacing()\n extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0)\n figsize = (5, 5)\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])\n\n plt.set_cmap(\"gray\")\n ax.imshow(nda, extent=extent, interpolation=None)", "def print_images(images,output_dir,image_num=0,pair=False,synth_images=None):\n for i in xrange(images.shape[0]):\n to_print = fix_image(images[i])\n\n if pair and synth_images is not None:\n synth_to_print = fix_image(synth_images[i])\n to_print = np.hstack((to_print,synth_to_print))\n\n #What is the name of the image?\n imsave(os.path.join(output_dir,str(image_num + i) + \".png\"), to_print)", "def display(self, index, mask_overlay = False, save = False):\n image_data = self[index].squeeze().numpy()\n x, y = self.location_of(index)\n \n plt.figure(index)\n plt.title(f'Image {index} at ({x}, {y})')\n plt.imshow(image_data, cmap='bone')\n \n if save:\n plt.savefig(f'{index}img.png', dpi = 400)\n \n if self.masked and mask_overlay:\n mask_data = self.get_mask(index).squeeze().numpy()\n plt.imshow(mask_data, cmap='viridis', alpha=0.4)\n \n if save:\n plt.savefig(f'{index}mask.png', dpi = 400)\n \n plt.show()", "def standard_visual(pred_data_path: str, input_data_path: str):\n nb_rows = 1 if pred_dataset == 'test' else 2\n\n prediction = sitk.GetArrayFromImage(sitk.ReadImage(pred_data_path))\n input_images, ground_truth = get_input(patient_id, input_data_path)\n prediction_masked = np.ma.masked_where(prediction == 0, prediction)\n\n for i in range(prediction.shape[0]):\n fig = plt.figure(figsize=(15, 7.5))\n fig.suptitle('{}: Slice {}'.format(patient_id, i))\n\n plt.subplot(nb_rows, 1, 1)\n plt.title('Prediction')\n plt.imshow(input_images[i, ...], 'gray', interpolation=None)\n plt.imshow(prediction_masked[i, ...], 'jet', vmin=0, vmax=3, interpolation='none', alpha=0.4)\n\n if nb_rows == 2:\n plt.subplot(nb_rows, 1, 2)\n plt.title('Ground Truth')\n plt.imshow(input_images[i, ...], 'gray', interpolation=None)\n plt.imshow(ground_truth[i, ...], 'jet', vmin=0, vmax=3, interpolation='none', alpha=0.4)\n\n plt.show(block=True)", "def printdata(self,whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.printdata(whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def displayFaceData(X, rows, cols):\n\twidth, height = 32, 32 # This is the shape of original photo (32*32)\n\tpictures_combined = np.zeros((height*rows, width*cols))\n\t\n\trow, col = 0, 0\n\tfor a_picture_index in xrange(rows*cols):\n\t\tif col == cols:\n\t\t\trow += 1\n\t\t\tcol = 0\n\t\ta_picture = ReshapeIntoImage(X[a_picture_index],width)\n\t\tpictures_combined[row*height:(row*height+a_picture.shape[0]), col*width:(col*width+a_picture.shape[1])] = a_picture\n\t\tcol += 1\n\n\tfig = plt.figure(figsize=(10,10))\n\timg = scipy.misc.toimage( pictures_combined )\n\tplt.imshow(img,cmap = cm.Greys_r)\n\tplt.show(block=False)", "def debug_image(self, state_index: int = -1):\n image = self.make_image(state_index, channel_type=\"n\")\n return np.array([np.sum(arr) for arr in image])[3:].reshape(8, 12)", "def print_adni_qc(outputdir, data, title):\n # extract filename for title\n title = os.path.basename(title)\n\n maximum = np.max(data)\n plt.imshow(data, cmap=plt.cm.jet, interpolation='nearest', vmin=0.15*maximum, vmax=0.75*maximum)\n plt.colorbar()\n plt.title(os.path.basename(title), fontsize=8)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.savefig(os.path.join(outputdir, '{}.jpg'.format(title)))\n plt.close()", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def print_row(row,writer,x):\n sys.stdout.write(unichr(0x2503))\n for n in xrange(row.shape[0]-1):\n writer(row[n],Width,(x,n))\n sys.stdout.write(unichr(0x2502))\n if row.shape[0] > 0:\n writer(row[-1],Width,(x,row.shape[0]-1))\n sys.stdout.write(unichr(0x2503) + '\\n')", "def test_plot_img_with_auto_cut_coords(display_mode):\n data = np.zeros((20, 20, 20))\n data[3:-3, 3:-3, 3:-3] = 1\n img = Nifti1Image(data, np.eye(4))\n plot_img(img, cut_coords=None, display_mode=display_mode, black_bg=True)\n plt.close()", "def showCortexImg(pV,nV):\n # object arrays of the positive and negative images\n pos_cort_img = np.empty(8, dtype=object)\n neg_cort_img = np.empty(8, dtype=object)\n for t in range(8):\n # cortical mapping functions\n lpos, rpos = cortex.cort_img(pV[:,t,:], L, L_loc, R, R_loc, cort_size, G)\n lneg, rneg = cortex.cort_img(nV[:,t,:], L, L_loc, R, R_loc, cort_size, G)\n pos_cort_img[t] = np.concatenate((np.rot90(lpos),np.rot90(rpos,k=3)),axis=1)\n neg_cort_img[t] = np.concatenate((np.rot90(lneg),np.rot90(rneg,k=3)),axis=1)\n # stack all images into a grid\n posRGcort = np.vstack((pos_cort_img[:4]))\n negRGcort = np.vstack((neg_cort_img[:4]))\n posYBcort = np.vstack((pos_cort_img[4:]))\n negYBcort = np.vstack((neg_cort_img[4:]))\n mergecort = np.concatenate((posRGcort,negRGcort,posYBcort,negYBcort),axis=1)\n return mergecort", "def write_nifti(self, output_path):\n nib.save(self.niftiImage, output_path)\n print('Image saved at: {}'.format(output_path))", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")", "def output_tensor(interpreter, i):\n output_details = interpreter.get_output_details()[i]\n output_data = np.squeeze(interpreter.tensor(output_details['index'])())\n if 'quantization' not in output_details:\n return output_data\n scale, zero_point = output_details['quantization']\n if scale == 0:\n return output_data - zero_point\n return scale * (output_data - zero_point)", "def output(self):\n try:\n sys.stdout.write(chr(self.tape.current_cell()%256)) # Wrapping fits it into ascii codes\n except:\n print \"Error -001\"", "def display(self):\n rows = [(self.views[0].display, len(self.views))]\n fig, axes = plt.subplots(1, len(self.views),\n figsize=self._figsize(rows),\n squeeze=True)\n for ax, view in zip(axes.ravel(), self.views):\n ax.imshow(view.display)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=view.position.id)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array" ]
[ "0.71635914", "0.70319664", "0.7031056", "0.6987682", "0.68559015", "0.6734682", "0.6646704", "0.64677995", "0.6236939", "0.60654515", "0.59142405", "0.56288266", "0.5549433", "0.5510461", "0.5490116", "0.5405265", "0.54043865", "0.5386131", "0.5378602", "0.53766", "0.5349916", "0.53345853", "0.53233147", "0.5274436", "0.52617544", "0.5247129", "0.5245189", "0.5230949", "0.5229908", "0.51902175", "0.5186047", "0.5184458", "0.51690644", "0.5155915", "0.51532286", "0.5137381", "0.5127667", "0.5125981", "0.5116036", "0.50922906", "0.505859", "0.50451785", "0.50314415", "0.50077665", "0.5004633", "0.5001875", "0.5001341", "0.4977786", "0.49746695", "0.4973014", "0.49705553", "0.49625605", "0.49597228", "0.49588355", "0.49491706", "0.49464342", "0.49378952", "0.49330574", "0.49228323", "0.49228323", "0.49162817", "0.4910205", "0.49089447", "0.4901419", "0.48925093", "0.48925093", "0.4890746", "0.4884274", "0.4882569", "0.48714066", "0.48637024", "0.4858", "0.48559403", "0.48438388", "0.48401174", "0.48308745", "0.48265335", "0.48249018", "0.48012698", "0.48000145", "0.47978744", "0.47928292", "0.47892663", "0.47880018", "0.47868156", "0.47821486", "0.47748852", "0.477364", "0.47682977", "0.4766021", "0.47547206", "0.47540522", "0.47469637", "0.4744398", "0.47321093", "0.47281054", "0.47271773", "0.47255906", "0.47150487", "0.47132534" ]
0.7089794
1
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; symmetry if this is set to pointgroup symmetry (cn or dn) or helical symmetry with pointgroup symmetry (scn or sdn), it will yield angles from the asymmetric unit, not the specified range;
def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = "Minus", symmetry='c1'): from math import pi, sqrt, cos, acos, tan, sin from utilities import even_angles_cd from string import lower,split angles = [] symmetryLower = symmetry.lower() symmetry_string = split(symmetry)[0] if (symmetry_string[0] == "c"): if(phi2 == 359.99): angles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi) if(int(symmetry_string[1:]) > 1): if( int(symmetry_string[1:])%2 ==0): qt = 360.0/int(symmetry_string[1:]) else: qt = 180.0/int(symmetry_string[1:]) n = len(angles) for i in xrange(n): t = n-i-1 if(angles[t][1] == 90.0): if(angles[t][0] >= qt): del angles[t] else: angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi) elif(symmetry_string[0] == "d"): if(phi2 == 359.99): angles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi) if (int(symmetry_string[1:])%2 == 0): qt = 360.0/2/int(symmetry_string[1:]) else: qt = 180.0/2/int(symmetry_string[1:]) n = len(angles) for i in xrange(n): t = n-i-1 if(angles[t][1] == 90.0): if(angles[t][0] >= qt): del angles[t] else: angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi) elif(symmetry_string[0] == "s"): #if symetry is "s", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2 # for helical, theta1 cannot be 0.0 if theta1 > 90.0: ERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1) if theta1 == 0.0: theta1 =90.0 theta_number = int((90.0 - theta1)/theta2) #for helical, symmetry = s or scn cn = int(symmetry_string[2:]) for j in xrange(theta_number,-1, -1): if( j == 0): if (symmetry_string[1] =="c"): if cn%2 == 0: k=int(359.99/cn/delta) else: k=int(359.99/2/cn/delta) elif (symmetry_string[1] =="d"): if cn%2 == 0: k=int(359.99/2/cn/delta) else: k=int(359.99/4/cn/delta) else: ERROR("For helical strucutre, we only support scn and sdn symmetry","even_angles",1) else: if (symmetry_string[1] =="c"): k=int(359.99/cn/delta) elif (symmetry_string[1] =="d"): k=int(359.99/2/cn/delta) for i in xrange(k+1): angles.append([i*delta,90.0-j*theta2,90.0]) else : # This is very close to the Saff even_angles routine on the asymmetric unit; # the only parameters used are symmetry and delta # The formulae are given in the Transform Class Paper # The symmetric unit nVec=[]; # x,y,z triples # is defined by three points b,c, v of Fig 2 of the paper # b is (0,0,1) # c is (sin(thetac),0,cos(thetac)) # a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac)) # f is the normalized sum of all 3 # The possible symmetries are in list_syms # The symmetry determines thetac and Omega # The spherical area is Omega - pi/3; # should be equal to 4 *pi/(3*# Faces) # # symmetry ='tet'; delta = 6; scrunch = 0.9 # closeness factor to eliminate oversampling corners #nVec=[] # x,y,z triples piOver = pi/180.0 Count=0 # used to count the number of angles if (symmetryLower[0:3] =="tet"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps elif (symmetryLower[0:3] =="oct"): m=4.0; fudge=0.8 elif (symmetryLower[0:3] =="ico"): m=5.0; fudge=0.95 else: ERROR("allowable symmetries are cn, dn, tet, oct, icos","even_angles",1) n=3.0 OmegaR = 2.0*pi/m; cosOmega= cos(OmegaR) Edges = 2.0*m*n/(2.0*(m+n)-m*n) Faces = 2*Edges/n Area = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega costhetac = cosOmega/(1-cosOmega) deltaRad= delta*pi/180 NumPoints = int(Area/(deltaRad*deltaRad)) fheight = 1/sqrt(3)/ (tan(OmegaR/2.0)) z0 = costhetac # initialize loop z = z0 phi = 0 Deltaz = (1-costhetac)/(NumPoints-1) #[1, phi,180.0*acos(z)/pi,0.] anglesLast = [phi,180.0*acos(z)/pi,0.] angles.append(anglesLast) nLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z] nVec = [] nVec.append(nLast) Count +=1 for k in xrange(1,(NumPoints-1)): z=z0 + Deltaz*k # Is it higher than fhat or lower r= sqrt(1-z*z) if (z > fheight): phiRmax= OmegaR/2.0 if (z<= fheight): thetaR = acos(z); cosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega); phiMax = 180.0*( OmegaR - acos(cosStuff))/pi angleJump = fudge* delta/r phi = (phi + angleJump)%(phiMax) anglesNew = [phi,180.0*acos(z)/pi,0.]; nNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z] diffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] diffMin = min(diffangleVec) if (diffMin>angleJump*piOver *scrunch): Count +=1 angles.append(anglesNew) nVec.append(nNew) #[Count, phi,180*acos(z)/pi,0.] anglesLast = anglesNew nLast=nNew angles.append( [0.0, 0.0, 0.0] ) nLast= [ 0., 0. , 1.] nVec.append(nLast) if(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] ) angles.reverse() if(phiEqpsi == "Minus"): for i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0 #print(Count,NumPoints) # look at the distribution # Count =len(angles); piOver= pi/180.0; # phiVec = [ angles[k][0] for k in range(Count)] ; # thetaVec = [ angles[k][1] for k in range(Count)] ; # xVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ] # yVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ] # zVec = [cos(piOver * angles[k][1]) for k in range(Count) ] # pylab.plot(yVec,zVec,'.'); pylab.show() return angles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):\n\tfrom math import pi, sqrt, cos, acos\n\tangles = []\n\tif (method == 'P'):\n\t\ttemp = Util.even_angles(delta, theta1, theta2, phi1, phi2)\n\t\t#\t\t phi, theta, psi\n\t\tfor i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);\n\telse: #elif (method == 'S'):\n\t\tDeltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)\n\t\ts = delta*pi/180.0\n\t\tNFactor = 3.6/s\n\t\twedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)\n\t\tNumPoints = int(NFactor*NFactor*wedgeFactor)\n\t\tangles.append([phi1, theta1, 0.0])\n\t\tz1 = cos(theta1*pi/180.0); \tphi=phi1 # initialize loop\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z1 + Deltaz*k/(NumPoints-1)\n\t\t\tr= sqrt(1-z*z)\n\t\t\tphi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))\n\t\t\t#[k, phi,180*acos(z)/pi, 0]\n\t\t\tangles.append([phi, 180*acos(z)/pi, 0.0])\n\t\t#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07\n\tif (phiEQpsi == 'Minus'):\n\t\tfor k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0\n\tif( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )\n\n\treturn angles", "def angles(self, num: int) -> Iterable[float]:\n if num < 2:\n raise ValueError(\"num >= 2\")\n start = self.dxf.start_angle % 360\n stop = self.dxf.end_angle % 360\n if stop <= start:\n stop += 360\n for angle in linspace(start, stop, num=num, endpoint=True):\n yield angle % 360", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def getAngles(self):\n try:\n return self._angleList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"ANGLE_FORCE_CONSTANT\"]\n angleEquil=self._raw_data[\"ANGLE_EQUIL_VALUE\"]\n anglePointers = self._raw_data[\"ANGLES_INC_HYDROGEN\"] \\\n +self._raw_data[\"ANGLES_WITHOUT_HYDROGEN\"]\n self._angleList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole/(units.radian*units.radian)).conversion_factor_to(units.kilojoule_per_mole/(units.radian*units.radian))\n for ii in range(0,len(anglePointers),4):\n if int(anglePointers[ii])<0 or \\\n int(anglePointers[ii+1])<0 or \\\n int(anglePointers[ii+2])<0:\n raise Exception(\"Found negative angle atom pointers %s\"\n % ((anglePointers[ii],\n anglePointers[ii+1],\n anglePointers[ii+2]),))\n iType=int(anglePointers[ii+3])-1\n self._angleList.append((int(anglePointers[ii])//3,\n int(anglePointers[ii+1])//3,\n int(anglePointers[ii+2])//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(angleEquil[iType])))\n return self._angleList", "def angle_calc(sides):\n return 360//sides", "def get_angles(sides):\n return [get_angle(sides[1], sides[2], sides[0]),\n get_angle(sides[2], sides[0], sides[1]),\n get_angle(sides[0], sides[1], sides[2])]", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def angles(self) -> list[npt.NDArray[np.float_]]:\n result = []\n a = cast(Segment, self.edges[-1])\n for b in self.edges:\n b = cast(Segment, b)\n result.append(angle(a.vertices[1], a.vertices[0], b.vertices[1]))\n a = b\n\n return result", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]", "def steps_to_angle():\n pass", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def get_euler_angles_from_T(T):\n pass", "def get_internal_angles(self):\n\n angles = []\n\n for elx, elz in zip(self.grid['x'], self.grid['z']):\n el_angles = []\n xy = np.vstack((elx, elz))\n for i in range(0, elx.size):\n i1 = (i - 1) % elx.size\n i2 = (i + 1) % elx.size\n\n a = (xy[:, i] - xy[:, i1])\n b = (xy[:, i2] - xy[:, i])\n # note that nodes are ordered counter-clockwise!\n angle = np.pi - np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1]\n )\n el_angles.append(angle * 180 / np.pi)\n angles.append(el_angles)\n return np.array(angles)", "def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)", "def angles(self):\n self._sort_measurements()\n return self._angles", "def archimedean(\n radius_start,\n radius_end,\n step,\n center=None,\n close=False,\n point_start=None,\n angle_start=None,\n arc_res=None):\n\n if radius_start > radius_end:\n sign = 1\n else:\n sign = -1\n\n # the spiral constant\n # evaluated from: step = K * 2 * pi\n K = step / (np.pi * 2)\n\n # use our constant to find angular start and end\n theta_start = radius_start / K\n theta_end = radius_end / K\n\n # if not passed set angular resolution\n if arc_res is None:\n arc_res = constants.default_arc\n\n arc_count = int(np.ceil((\n abs(theta_end - theta_start)) / arc_res))\n\n # given that arcs will share points how many\n # points on the helix do we need\n arc_index, point_count = arc_indexes(arc_count)\n\n assert arc_index.max() == point_count - 1\n\n # create an array of angles\n theta = np.linspace(theta_start, theta_end, point_count)\n\n # use the spiral equation to generate radii\n radii = theta * K\n\n # make sure they match\n assert np.isclose(radii[0], radius_start)\n assert np.isclose(radii[-1], radius_end)\n\n # do offset AFTER radius calculation\n if angle_start is not None:\n theta += (angle_start - theta_start)\n\n # convert polar coordinates to 2D cartesian\n points = np.column_stack(\n (np.cos(theta), np.sin(theta))) * radii.reshape((-1, 1))\n\n if close:\n\n # get indexes of arcs required to close\n close_idx, close_ct = arc_indexes(\n int(np.ceil((np.pi * 2) / arc_res)))\n\n # the additional angles needed to close\n # we are cutting off the first point as its a duplicate\n t_close = np.linspace(theta[-1],\n theta[-1] + np.pi * 2 * sign,\n close_ct)[1:]\n\n # additional points to close the arc\n closer = np.column_stack((\n np.cos(t_close), np.sin(t_close))) * radii[-1]\n assert len(closer) == close_ct - 1\n assert len(points) == point_count\n\n # stack points with closing arc\n points = np.vstack((points, closer))\n # add the additional points to the count\n point_count += close_ct - 1\n # add the additional arc indexes\n\n arc_index = np.vstack((\n arc_index, arc_index[-1][-1] + close_idx))\n\n assert len(points) == point_count\n # max index of arcs should correspond to points\n assert arc_index[-1][-1] == point_count - 1\n\n if center is not None:\n points += center\n\n # convert sequential points into three point arcs\n arcs = points[arc_index]\n\n if constants.strict:\n # check all arcs to make sure the correspond\n for a, b in zip(arcs[:-1], arcs[1:]):\n assert np.allclose(a[2], b[0])\n\n if point_start is not None:\n a, b = np.clip(\n (point_start[:2] - center[:2]) / radius_start,\n -1.0, 1.0)\n assert np.isclose(a, np.cos(angle_start), atol=1e-3)\n assert np.isclose(b, np.sin(angle_start), atol=1e-3)\n\n return arcs", "def angles(self):\n penult = self._coordinates[-2]\n last = self._coordinates[-1]\n angles = []\n for c in self._coordinates:\n angle = (math.atan2(penult[0]-last[0], penult[1]-last[1]) -\n math.atan2(c[0]-last[0], c[1]-last[1]))\n angles.append(angle)\n penult, last = last, c\n return sorted(angles)", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle\n theta = -np.sin(self.A[0, 2]) # Pitch Angle\n psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle\n return np.array([phi, theta, psi])", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def __generate_LSP_angles__(self):\n self.LSP_ANGLES = np.linspace(0, self._range_lsp_angle, ArrayInfo.len_lsp) - (self._range_lsp_angle / 2)\n self.LSP_MIN_ANGLE = np.min(self.LSP_ANGLES) - 0.5 # Angles outside of this range are discarded\n self.LSP_MAX_ANGLE = np.max(self.LSP_ANGLES) + 0.5 # Angles outside of this range are discarded", "def angle(self, dates, values, angle_type):\n \n print(\"Angels running...\")\n exa_days = []\n exa_idx, extms = self.extrema(values, angle_type)\n for i in range(len(exa_idx)):\n exa_days.append(dates[exa_idx[i]])\n def_dates, def_point, k = self.calAng(exa_days, extms, angle_type)\n print(\"Angles done!\")\n return def_dates, def_point, k", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def euler2Q(self, (phi, theta, psi)):\n\thalf_phi = 0.5*phi;\n\thalf_theta = 0.5*theta;\n\thalf_psi = 0.5*psi;\n\n return np.asarray([\n (cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),\n (sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),\n (cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),\n (cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))\n ]);", "def standarize_euler(euler: np.ndarray, in_radian=True) -> np.ndarray:\n if not in_radian:\n euler = np.radians(euler)\n return np.where(\n euler<0, \n (euler+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),\n euler%(2*np.pi)\n )", "def projection_angles(name):\n if name == 'xy':\n return 0, 0, 0\n elif name == 'xz':\n return -np.pi/2, 0, 0\n elif name == 'yz':\n return -np.pi/2, 0, -np.pi/2\n elif name == 'yx':\n return 0, np.pi, np.pi/2\n elif name == 'zx':\n return np.pi/2, np.pi/2, 0\n elif name == 'zy':\n return np.pi, np.pi/2, np.pi\n else:\n raise ValueError('Invalid projection name: {!r}.'.format(name))", "def number_to_angle(number: int, number_sectors: int) -> [np.float, np.float, np.float]:\n angles_phi = np.linspace(-22.5, 22.5, number_sectors)\n angles_theta = np.linspace(67.5, 112.5, number_sectors)\n theta = number // number_sectors\n phi = number % number_sectors\n return asCartesian([1, angles_theta[theta], angles_phi[phi]])", "def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal", "def Euler2Rotation(phi, theta, psi):\n # only call sin and cos once for each angle to speed up rendering\n c_phi = np.cos(phi)\n s_phi = np.sin(phi)\n c_theta = np.cos(theta)\n s_theta = np.sin(theta)\n c_psi = np.cos(psi)\n s_psi = np.sin(psi)\n\n R_roll = np.array([[1, 0, 0],\n [0, c_phi, s_phi],\n [0, -s_phi, c_phi]])\n R_pitch = np.array([[c_theta, 0, -s_theta],\n [0, 1, 0],\n [s_theta, 0, c_theta]])\n R_yaw = np.array([[c_psi, s_psi, 0],\n [-s_psi, c_psi, 0],\n [0, 0, 1]])\n\n R = R_roll @ R_pitch @ R_yaw # inertial to body (Equation 2.4 in book)\n return R.T # transpose to return body to inertial", "def get_degrees(self):\n return np.arange(self.lmax + 1)", "def calculate_angle(start: tuple, end: tuple):\n radians = -math.atan2(end[0] - start[0], end[1] - start[1])\n return math.degrees(radians) % 360", "async def attitude_euler(self):\n\n request = telemetry_pb2.SubscribeAttitudeEulerRequest()\n attitude_euler_stream = self._stub.SubscribeAttitudeEuler(request)\n\n try:\n async for response in attitude_euler_stream:\n \n\n \n yield EulerAngle.translate_from_rpc(response.attitude_euler)\n finally:\n attitude_euler_stream.cancel()", "def angle(self):\n return np.array([f.angle() for f in self])", "def rotationMatrixToEulerAngles(R) :\n sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n\n if not singular :\n x = np.arctan2(R[2,1] , R[2,2])\n y = np.arctan2(-R[2,0], sy)\n z = np.arctan2(R[1,0], R[0,0])\n else :\n x = np.arctan2(-R[1,2], R[1,1])\n y = np.arctan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])", "def _rotation_matrix_to_euler_angles(self, R):\n assert (self._is_rotation_matrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def _euler_spun_90(self):\n # specifying variables from shape using .self and the cutting coordinates to fit my old definition\n effective_radius = self.radius\n start_point = cutting_coordinates(self.original_shape[0], self.original_shape[1], self.radius)[0]\n bend_point = self.original_shape[1]\n end_point = cutting_coordinates(self.original_shape[2], self.original_shape[1], self.radius)[0]\n end_cut = cutting_coordinates(self.original_shape[2], self.original_shape[1], self.radius)\n\n # Uses Euler curve definition with 90 degree angle for l bend and rotates for input angle\n standard_curve = self._euler_90_algorithm()\n\n # determine which direction the curve is going by comparing the start and end point and input angle\n direction = self._left_or_right()\n input_angle = np.arctan2((bend_point[1] - start_point[1]), (bend_point[0] - start_point[0]))\n\n # calculates the cutting positions around the bend point from the input radius\n first_cut = start_point\n second_cut = end_point\n\n # Rotate the outputs by rotation matrix and offset, flip y depending on left or right\n spun_x = []\n for elems in standard_curve:\n spun_x.append(elems[0] * np.cos(input_angle) + elems[1] * -1. * np.sin(input_angle) * direction + first_cut[0])\n\n spun_y = []\n for elems in standard_curve:\n spun_y.append(elems[0] * np.sin(input_angle) + elems[1] * np.cos(input_angle) * direction + first_cut[1])\n\n # combine the flipped(or not) y's and normal x's\n full_spun = zip(spun_x, spun_y)\n\n # remove final point and replace it with the end point just to avoid the radius scaling issue\n # need another definition because the structure needs to a list of 1 tuple!\n full_shape = full_spun[:-1] + end_cut\n return full_shape", "def cell_angles(lattice, return_unit='radian'):\n assert lattice.shape == (3,3)\n a = lattice[:, 0]\n b = lattice[:, 1]\n c = lattice[:, 2]\n alpha = angle_between_vectors(b, c)\n beta = angle_between_vectors(c, a)\n gamma = angle_between_vectors(a, b)\n\n if return_unit == 'radian':\n return (alpha, beta, gamma)\n elif return_unit == 'degrees':\n return [angle * (180. / np.pi) for angle in (alpha, beta, gamma)]\n else:\n quit(\"Return unit not valid: \", return_unit)", "def refine_angles(self, method='nelder', **opts):\n self.set_idx()\n from lmfit import fit_report, minimize\n p0 = self.define_parameters(**opts)\n self.result = minimize(self.angle_residuals, p0, method=method)\n self.fit_report = fit_report(self.result)\n if self.result.success:\n self.get_parameters(self.result.params)", "def angles(self):\n return self._angles", "def polygon(center, sides, radius=1, rotation=0, translation=None):\n\n\tone_segment = math.pi * 2 / sides\n\tpoints = [\n\t\t(int(round(center[0] + math.sin(one_segment * i + rotation) * radius, 0)),\n\t\t int(round(center[1] + math.cos(one_segment * i + rotation) * radius, 0)))\n\t\tfor i in range(sides)]\n\tif translation:\n\t\tpoints = [[sum(pair) for pair in zip(point, translation)]\n\t\t\t\t for point in points]\n\treturn points", "def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def polar(self):\n assert self.is_compact(), \"Not a polytope.\"\n\n verts = [list(v() - self.center()) for v in self.vertex_generator()]\n return Polyhedron(ieqs = [[1] + list(v) for v in verts], \n field = self.field())", "def deg2rad(a):", "def calculate_angles_to_rotate_vector(self, starting_vec, ending_vec, starting_angles=None, search_method=0):\n #Find the starting rotation matrix\n if not starting_angles is None:\n (phi, chi, omega) = starting_angles[0:3]\n starting_rot_matrix = numpy_utils.rotation_matrix(phi, chi, omega)\n #Rotate the starting vector\n starting_vec = np.dot(starting_rot_matrix, column(starting_vec)).flatten()\n\n #Find the rotation matrix that satisfies ending_vec = R . starting_vec\n\n #The cross product of q0 X q_over_a gives a rotation axis to use\n rotation_axis = np.cross(starting_vec, ending_vec)\n\n #Now we find the rotation angle about that axis that puts q0 on q_over_a\n angle = np.arccos( np.dot(starting_vec, ending_vec) / (vector_length(starting_vec)*vector_length(ending_vec)))\n\n #Make the rot. matrix\n R = numpy_utils.rotation_matrix_around_vector(rotation_axis, angle)\n\n if not starting_angles is None:\n #The final rotation we want is starting_rot_matrix 1st; R second.\n # So this is the resulting matrix\n R = np.dot(R, starting_rot_matrix)\n\n #The function finds some angles that work\n angles = numpy_utils.angles_from_rotation_matrix(R)\n\n #Position is always allowed\n return (angles)", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw", "def angle(self) -> float:\n ...", "def create_spiral(r1, r2, N):\n Pi = 3.141592\n points = []\n finished = [False]\n\n def rad(phi):\n return phi / (2 * Pi)\n\n def ang(rad):\n return 2 * Pi * rad\n\n def coord(phi):\n r = rad(phi)\n return (r * sin(phi), r * cos(phi))\n\n def fullcoord(phi, z):\n c = coord(phi)\n return [c[0], c[1], z]\n\n def dist(phi1, phi2):\n c1 = coord(phi1)\n c2 = coord(phi2)\n d = sqrt((c1[1] - c2[1]) ** 2 + (c1[0] - c2[0]) ** 2)\n return d\n\n def nextphi(phi):\n phi1 = phi\n phi2 = phi + 0.7 * Pi\n mid = phi2\n while abs(dist(phi, mid) - 1) > 0.00001:\n mid = (phi1 + phi2) / 2.\n if dist(phi, mid) > 1:\n phi2 = mid\n else:\n phi1 = mid\n return mid\n\n def prevphi(phi):\n\n phi1 = phi\n phi2 = phi - 0.7 * Pi\n mid = phi2\n\n while abs(dist(phi, mid) - 1) > 0.00001:\n mid = (phi1 + phi2) / 2.\n if dist(phi, mid) > 1:\n phi2 = mid\n else:\n phi1 = mid\n return mid\n\n def add_point(point, points=points, finished=finished):\n if (len(points) == N) or (finished[0] == True):\n points = np.array(points)\n finished[0] = True\n print(\"finished!!!\")\n else:\n points.append(point)\n\n z = 0\n forward = True\n curphi = ang(r1)\n add_point(fullcoord(curphi, z))\n while True:\n if finished[0] == True:\n return np.transpose(points)\n if forward == True:\n curphi = nextphi(curphi)\n add_point(fullcoord(curphi, z))\n if (rad(curphi) > r2):\n forward = False\n z += 1\n add_point(fullcoord(curphi, z))\n else:\n curphi = prevphi(curphi)\n add_point(fullcoord(curphi, z))\n if (rad(curphi) < r1):\n forward = True\n z += 1\n add_point(fullcoord(curphi, z))", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 130 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57 and not np.abs(omega - np.deg2rad(self.omega)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #Omega is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent phi\n return [phi]", "def angle_peaks(self, i, j):\n g1 = norm_vec(self.Gvec(self.xp[i], self.yp[i], self.zp[i]))\n g2 = norm_vec(self.Gvec(self.xp[j], self.yp[j], self.zp[j]))\n return np.around(np.arccos(float(g1.T*g2)) * degrees, 3)", "def _rotation_matrix_to_euler_angles(R):\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def Angles(self, degrees=True):\n\n self.__do_essential_memebers_exist__()\n if self.InferElementalDimension() != 2:\n raise ValueError(\"Angles can be computed only for 2D elements\")\n if self.InferSpatialDimension() != 2:\n raise ValueError(\"Angles can be computed only in 2-dimensional plane\")\n\n nodeperelem = self.InferNumberOfNodesPerLinearElement()\n angles = np.zeros((self.nelem, nodeperelem))\n\n norm = lambda x: np.linalg.norm(x,axis=1)\n\n edge_coords = self.points[self.elements[:,:],:]\n if self.element_type == \"tri\":\n AB = edge_coords[:,1,:] - edge_coords[:,0,:]\n AC = edge_coords[:,2,:] - edge_coords[:,0,:]\n BC = edge_coords[:,2,:] - edge_coords[:,1,:]\n\n angles[:,0] = np.einsum(\"ij,ij->i\",AB,AC) / (norm(AB)*norm(AC))\n angles[:,1] = np.einsum(\"ij,ij->i\",AC,BC) / (norm(AC)*norm(BC))\n angles[:,2] = np.einsum(\"ij,ij->i\",BC,-AB)/ (norm(BC)*norm(AB))\n angles = np.arccos(angles)\n\n elif self.element_type == \"quad\":\n AB = edge_coords[:,1,:] - edge_coords[:,0,:]\n BC = edge_coords[:,2,:] - edge_coords[:,1,:]\n CD = edge_coords[:,3,:] - edge_coords[:,2,:]\n DA = edge_coords[:,0,:] - edge_coords[:,3,:]\n\n angles[:,0] = np.einsum(\"ij,ij->i\",AB,BC) / (norm(AB)*norm(BC))\n angles[:,1] = np.einsum(\"ij,ij->i\",BC,CD) / (norm(BC)*norm(CD))\n angles[:,2] = np.einsum(\"ij,ij->i\",CD,DA) / (norm(CD)*norm(DA))\n angles[:,3] = np.einsum(\"ij,ij->i\",DA,-AB)/ (norm(DA)*norm(AB))\n angles = np.arccos(angles)\n\n if degrees:\n angles *= 180/np.pi\n\n return angles", "def get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))\n return pos * angle_rates", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def test_to_from_euler(self):\n np.random.seed(0)\n angles_euler = np.pi * np.random.rand(100, 3)\n conventions_euler = [\"xzx\", \"xyx\", \"yxy\", \"yzy\", \"zyz\", \"zxz\"]\n\n # For Tait-Bryan angles the second angle must be between -pi/2 and pi/2\n angles_tb = angles_euler.copy()\n angles_tb[:, 1] -= np.pi / 2\n conventions_tb = [\"xzy\", \"xyz\", \"yxz\", \"yzx\", \"zyx\", \"zxy\"]\n\n axis_types = [\"extrinsic\", \"intrinsic\"]\n\n for convention in conventions_euler:\n for axis_type in axis_types:\n out = rowan.to_euler(\n rowan.from_euler(\n angles_euler[..., 0],\n angles_euler[..., 1],\n angles_euler[..., 2],\n convention,\n axis_type,\n ),\n convention,\n axis_type,\n )\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(out - angles_euler, 0),\n np.isclose(out + angles_euler, 0),\n )\n ),\n msg=\"Failed for convention {}, axis type {}\".format(\n convention, axis_type\n ),\n )\n\n for convention in conventions_tb:\n for axis_type in axis_types:\n out = rowan.to_euler(\n rowan.from_euler(\n angles_tb[..., 0],\n angles_tb[..., 1],\n angles_tb[..., 2],\n convention,\n axis_type,\n ),\n convention,\n axis_type,\n )\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(out - angles_tb, 0),\n np.isclose(out + angles_tb, 0),\n )\n ),\n msg=\"Failed for convention {}, axis type {}\".format(\n convention, axis_type\n ),\n )", "def angles(self,compass=0,vertical=0,roll=0):\n self.matrix = makeMatrix(compass,vertical,roll)", "def convergence_angle(self):\n return np.arctan2(self.radius, self.focal_length)", "def euler_angle_to_rotation(ea, convention='zyx'):\n axis_names_to_vectors = dict([('x', (1, 0, 0)), ('y', (0, 1, 0)), ('z', (0, 0, 1))])\n axis0, axis1, axis2 = convention\n R0 = so3.rotation(axis_names_to_vectors[axis0], ea[0])\n R1 = so3.rotation(axis_names_to_vectors[axis1], ea[1])\n R2 = so3.rotation(axis_names_to_vectors[axis2], ea[2])\n return so3.mul(R0, so3.mul(R1, R2))", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def rad_to_deg(angles, to_list=False, right_hand=False):\n angles = np.asarray(angles)\n angles *= 180 / np.pi\n\n if right_hand:\n angles[0] *= -1\n\n if to_list:\n angles = list(angles)\n\n return angles", "def angsep(phi, theta, deg=True):\n ra1, ra2 = phi\n dec1, dec2 = theta\n\n if deg==True:\n ra1, ra2 = np.radians(ra1), np.radians(ra2)\n dec1, dec2 = np.radians(dec1), np.radians(dec2)\n \n sin = np.sin\n cos = np.cos\n return np.arccos( sin(dec1)*sin(dec2)+cos(dec1)*cos(dec2)*cos(ra1-ra2) )", "def calculate_angles(self,chunk):\n import math\n import collections\n\n Angles = collections.namedtuple(\"Angles\", \"ev az\")\n x = float(chunk['x'])\n y = float(chunk['y'])\n z = float(chunk['z'])\n ev = round(90 - math.acos(z/math.sqrt(x*x+y*y+z*z))*180/math.pi)\n az = round(math.atan2(y,x)*180/math.pi)\n\n return(Angles(ev, az))", "def test_polar_angle_special_case(self):\n\n point1 = np.array([2, 1])\n point2 = np.array([1, 1])\n pol_angle = convex_hull.polar_angle(point2, point1)\n\n self.assertEqual(pol_angle, -10.)", "def dimensionless_angles():\n return Equivalency([(si.radian, None)], \"dimensionless_angles\")", "def areap_diamond(RA, Dec, radius_RA, radius_Dec):\n\n minRA = RA-radius_RA/2.\n maxRA = RA+radius_RA/2.\n minDec = Dec-radius_Dec/2.\n maxDec = Dec+radius_Dec/2.\n\n poly = [[minRA, Dec], [RA, maxDec], [maxRA, Dec], [RA, minDec]]\n\n return geometry.Polygon(poly)", "def getTiltAnglesFromTiltSeries(self, TiltSeries_):\n from math import sin, cos, pi\n\n if len(TiltSeries_._projIndices) != self._ntilt:\n print(\"Houston: We have a problem!\")\n # initialize alignment in seperate array - easier for optimization\n # sin and cos\n self._tiltAngles = numpy.array(self._ntilt * [0.])\n self._cTilt = numpy.array(self._ntilt * [0.])\n self._sTilt = numpy.array(self._ntilt * [0.])\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n the = proj.getTiltAngle()\n self._tiltAngles[kk] = the\n self._sTilt[kk] = sin(the / 180. * pi)\n self._cTilt[kk] = cos(the / 180. * pi)\n return self._tiltAngles", "def test_vectors_angle2(self):\n\n # Example 1.4\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle2_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle2_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def angles(self):\n\n return self._angles", "def calcScatterAngleOld(R, PHI, THETA, sun_rotation):\n \n H_rot = atmo_utils.calcRotationMatrix(sun_rotation)\n\n X_ = R * np.sin(THETA) * np.cos(PHI)\n Y_ = R * np.sin(THETA) * np.sin(PHI)\n Z_ = R * np.cos(THETA)\n \n XYZ_dst = np.vstack((X_.ravel(), Y_.ravel(), Z_.ravel(), np.ones(R.size)))\n XYZ_src_ = np.dot(H_rot, XYZ_dst)\n \n Z_rotated = XYZ_src_[2, :]\n R_rotated = np.sqrt(np.sum(XYZ_src_[:3, :]**2, axis=0))\n \n angle = np.arccos(Z_rotated/(R_rotated+amitibo.eps(R_rotated)))\n \n return angle", "def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])", "def test_float_input_angles(self):\n decomposer = OneQubitEulerDecomposer(\"PSX\")\n input_matrix = np.array(\n [\n [0.70710678, 0.70710678],\n [0.70710678, -0.70710678],\n ],\n dtype=np.float64,\n )\n (theta, phi, lam) = decomposer.angles(input_matrix)\n expected_theta = 1.5707963267948966\n expected_phi = 0.0\n expected_lam = 3.141592653589793\n self.assertAlmostEqual(theta, expected_theta)\n self.assertAlmostEqual(phi, expected_phi)\n self.assertAlmostEqual(lam, expected_lam)", "def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)", "def parangle(ra, dec, utdate, uttime, site, verbose=False):\n # degrees per radian\n degrad = 180. * u.deg /(np.pi * u.rad)\n\n l_ra = ra.strip()\n l_dec = dec.strip()\n if '-' not in l_dec and l_dec[0] != '+':\n l_dec = '+' + l_dec\n\n # Coordinate object\n coord = SkyCoord(l_ra,l_dec,frame='icrs',unit = (u.hr, u.deg))\n\n # Observation time\n obs_time = Time(utdate + 'T' + uttime, format='isot', scale='utc')\n\n # Location\n location = EarthLocation.of_site(site)\n if verbose:\n print('Site: ', location)\n\n altaz = coord.transform_to(AltAz(obstime=obs_time, location=location))\n if verbose:\n print('Alt/Az: ', altaz.alt.deg, altaz.az.deg)\n\n # Hour angle\n ha = np.arcsin(-np.sin(altaz.az) * np.cos(altaz.alt) / np.cos(coord.dec))\n if verbose:\n print('HA: ', ha)\n\n # Parallactic angle\n parang = -degrad * np.arctan2(-np.sin(ha),\n np.cos(coord.dec) * np.tan(location.lat) - np.sin(coord.dec) * np.cos(ha))\n\n return parang", "def arc(radius = 10, angle = 90, num_pts = 720):\n t = np.linspace(0, angle*np.pi/180, abs(int(num_pts*angle/360))-2)\n x = radius*np.cos(t)\n y = radius*np.sin(t)\n points = np.array((x,y)).T\n start_angle = 90*np.sign(angle)\n end_angle = start_angle + angle\n return points, start_angle, end_angle", "def rad2deg(a):", "def get_euler(self):\n return array([ coord * self.coords for coord in self.coords ])", "def helix(radius,\n height,\n pitch,\n center=None,\n arc_res=None,\n epsilon=1e-8,\n return_angle=False):\n if np.abs(height) < epsilon:\n arcs = np.array([], dtype=np.float64)\n if return_angle:\n return arcs, 0.0\n return arcs\n\n # set a default arc size if not passed\n if arc_res is None:\n arc_res = constants.default_arc\n\n # total angle we're traversing\n angle = (np.pi * 2.0 * height) / pitch\n\n # how many arc sections will result, making sure to ceil\n arc_count = int(np.ceil(angle / arc_res))\n\n arc_index, point_count = arc_indexes(arc_count)\n\n # we're doing 3-point arcs\n theta = np.linspace(0.0, angle, point_count)\n\n # Z is linearly ramping for every point\n z = np.linspace(0.0, height, len(theta))\n\n # convert cylindrical to cartesian\n cartesian = np.column_stack(\n (np.cos(theta), np.sin(theta), z))\n # multiply XY by radius\n cartesian[:, :2] *= radius\n if center is not None:\n cartesian[:, :2] += center\n else:\n center = [0, 0, 0]\n # now arcs are 3 cartesian points\n arcs = cartesian[arc_index]\n\n if return_angle:\n # return the final angle\n helix_end = theta[-1] % (np.pi * 2)\n\n vec = arcs[-1][-1][:2] - center[:2]\n # norm of arc should be close to radius\n assert np.isclose(np.linalg.norm(vec), radius, rtol=1e-3)\n # check to make sure the angle is accurate\n a, b = np.clip(vec / radius, -1.0, 1.0)\n ac, bc = np.cos(helix_end), np.sin(helix_end)\n\n assert np.isclose(a, ac, atol=1e-3)\n assert np.isclose(b, bc, atol=1e-3)\n\n return arcs, helix_end\n\n return arcs", "def spacedvals(self, method='default'):\n p0s = []\n mins = np.array([self.inc_min, self.Peq_min, self.k_min, self.lat_min, self.lon_min, self.rad_min])\n maxs = np.array([self.inc_max, self.Peq_max, self.k_max, self.lat_max, self.lon_max, self.rad_max])\n if method == 'random':\n m = np.ones(self.n_spaced ** 6, dtype=bool)\n nbad = m.sum()\n q = np.inf * np.ones((self.n_spaced ** 6, 6))\n while nbad > 0:\n p0s = np.random.rand(self.n_spaced ** 6, 6)\n q[m] = p0s[m] * (maxs - mins) + mins\n lp = np.array([self.lnprior(p) for p in q])\n m = ~np.isfinite(lp)\n nbad = m.sum()\n else:\n for i in range(6):\n p0s.append(np.arange(mins[i] + (maxs[i] - mins[i]) / (2 * self.n_spaced), maxs[i],\n (maxs[i] - mins[i]) / self.n_spaced))\n q = list(product(*p0s))\n np.random.shuffle(q)\n return q", "def angle(z):", "def angles_vectors_degrees(u, v):\n a = angle_smallest_vectors_degrees(u, v)\n return a, 360. - a", "def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def Spiral(self, a, n):\n t = range(0,360*n)\n a = float(a)\n x = []\n y = [] \n for i in t:\n i = self.deg2rad(i)\n x.append(a*i*math.cos(i)) \n y.append(a*i*math.sin(i)) \n return x, y", "def compute_angle(self, direction):\n scaled_cosine = self.w1.dot(direction) # ||direction|| cos(theta)\n scaled_sine = self.w2.dot(direction) # ||direction|| sin(theta)\n return np.arctan2(scaled_sine, scaled_cosine)", "def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n fixed_omega = False\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)\n assert len(phi_psi_atoms) == len(angles), \"%d != %d\" % (len(phi_psi_atoms), len(angles))\n if not direction_forward:\n phi_psi_atoms.reverse()\n angles.reverse()\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n # print \"phi_psi_pair\", phi_psi_pair\n omega = ps_atoms[2]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # print \"ps_atoms, target_angle_pair\", phi_psi_angles, target_angle_pair\n # phi\n if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:\n rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # psi\n if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:\n rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # omega\n if omega is not None and abs(abs(omega)-180) > 10 and check_omega:\n rotation_angle= -omega+180\n # print \"Omega rotation:\", omega, rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][0],\n phi_psi_pair[0][1],\n angle=rotation_angle,\n direction_forward=direction_forward)\n fixed_omega = True\n # print utils.list_rama_outliers_h(result_h)\n # result_h.write_pdb_file(file_name=\"variant_%s.pdb\" % direction_forward)\n # STOP()\n return result_h, fixed_omega", "def analyze_internal_angles(self, return_plot=False):\n angles = self.get_internal_angles().flatten()\n\n print('Minimal angle: {0} degrees'.format(np.min(angles)))\n print('Maximal angle: {0} degrees'.format(np.max(angles)))\n # print out quantiles\n for i in range(10, 100, 10):\n print('Angle percentile {0}%: {1:0.2f} degrees'.format(\n i,\n np.percentile(angles, i),\n ))\n\n if return_plot:\n print('generating plot...')\n fig, ax = plt.subplots(1, 1, figsize=(12 / 2.54, 8 / 2.54))\n ax.hist(angles, int(angles.size / 10))\n ax.set_xlabel('angle [deg]')\n ax.set_ylabel('count')\n fig.tight_layout()\n # fig.savefig('plot_element_angles.jpg', dpi=300)\n return fig, ax", "def toEulerAngle(w, x, y, z):\n # roll (x-axis rotation)\n sinr = +2.0 * (w * x + y * z)\n cosr = +1.0 - 2.0 * (x * x + y * y)\n roll = math.atan2(sinr, cosr)\n # pitch (y-axis rotation)\n sinp = +2.0 * (w * y - z * x)\n if (math.fabs(sinp) >= 1):\n pitch = math.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\n else:\n pitch = math.asin(sinp)\n # yaw (z-axis rotation)\n siny = +2.0 * (w * z + x * y)\n cosy = +1.0 - 2.0 * (y * y + z * z)\n yaw = math.atan2(siny, cosy)\n return roll, pitch, yaw", "def get_NONuniform_polar_angle(xx, yy, rsq, angle_thresh = 3*np.pi/4, rsq_thresh = 0, pysub = 'hcp_999999'):\n\n hsv_angle = []\n hsv_angle = np.ones((len(rsq), 3))\n\n ## calculate polar angle\n polar_angle = np.angle(xx + yy * 1j)\n\n ## set normalized polar angle (0-1), and make nan irrelevant vertices\n hsv_angle[:, 0] = np.nan \n hsv_angle[:, 0][rsq > rsq_thresh] = ((polar_angle + np.pi) / (np.pi * 2.0))[rsq > rsq_thresh]\n\n ## normalize angle threshold for overepresentation\n angle_thresh_norm = (angle_thresh + np.pi) / (np.pi * 2.0)\n\n ## get mid vertex index (diving hemispheres)\n left_index = cortex.db.get_surfinfo(pysub).left.shape[0] \n\n ## set angles within threh interval to 0\n ind_thresh = np.where((hsv_angle[:left_index, 0] > angle_thresh_norm) | (hsv_angle[:left_index, 0] < 1-angle_thresh_norm))[0]\n hsv_angle[:left_index, 0][ind_thresh] = 0\n\n ## now take angles from RH (thus LVF) \n #### ATENÇÃO -> minus sign to flip angles vertically (then order of colors same for both hemispheres) ###\n # also normalize it\n hsv_angle[left_index:, 0] = ((np.angle(-1*xx + yy * 1j) + np.pi) / (np.pi * 2.0))[left_index:]\n\n # set angles within threh interval to 0\n ind_thresh = np.where((hsv_angle[left_index:, 0] > angle_thresh_norm) | (hsv_angle[left_index:, 0] < 1-angle_thresh_norm))[0]\n hsv_angle[left_index:, 0][ind_thresh] = 0\n\n ## make final RGB array\n rgb_angle = np.ones((len(rsq), 3))\n rgb_angle[:] = np.nan\n\n rgb_angle[rsq > rsq_thresh] = colors.hsv_to_rgb(hsv_angle[rsq > rsq_thresh])\n\n return rgb_angle", "def deg_to_rad(angles, to_list=False, right_hand=False):\n angles = np.asarray(angles)\n angles *= np.pi / 180\n\n if right_hand:\n angles[0] *= -1\n\n if to_list:\n angles = list(angles)\n\n return angles", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(omega - np.deg2rad(self.omega)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, chi]", "def Rpy(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,2] = +S\n M[2,0] = -S\n M[2,2] = +C\n\n return M", "def partial_euler(angle = 90, Rmin = 3, Reff = None, p = 0.2, num_pts = 720):\n # Overhead calculations\n num_pts = abs(int(num_pts * angle/360))\n angle = np.radians(angle)\n sp = np.sqrt(p*angle) # Clothoid-to-normal transition point s value\n s0 = 2*sp + angle*(1-p)/(2*np.sqrt(p*angle/2))\n c = 1 / (2*sp*Rmin) # Scaling factor to enforce Rmin\n print(sp)\n\n # Constructing s and K arrays\n s = np.linspace(0, s0, num_pts)\n if p == 0: K = np.array([[1/Rmin] * len(s)])\n else:\n i1 = np.argmax(s > sp)\n i2 = np.argmax(s >= s0 - sp)\n K = c * np.concatenate([np.multiply(np.ones(i1), 2*s[:i1]),\n np.multiply(np.ones(i2-i1), 2*sp),\n np.multiply(np.ones(num_pts-i2), \n 2*(s0 - s[i2:num_pts]))])\n\n # Integrating to find x and y\n ds = s[1] - s[0]\n phi = cumtrapz(K*ds)\n x, y = np.concatenate([np.array([[0],[0]]), \n np.cumsum([ds*np.cos(phi), ds*np.sin(phi)], axis = 1)],\n axis = 1)\n\n return x, y", "def calculate_and_encrypt_angles(self, positions: List):\n angles = []\n for pos in positions:\n vertex = self.__client_knowledge[pos]\n cmds = vertex.get_commands()\n for cmd in cmds:\n if cmd.name == 'M':\n # Calculate the adaptive angle\n signal_s = sum([self.__client_knowledge[pos].get_outcome() for pos in cmd.domain_s]) % 2\n signal_t = sum([self.__client_knowledge[pos].get_outcome() for pos in cmd.domain_t]) % 2\n adaptive_angle = (-1) ** signal_s * cmd.angle + signal_t * pi\n\n # Encrypt each angle with rotation and flipping method\n encrypted_angle = adaptive_angle + \\\n vertex.get_rotation_encryption_angle() + \\\n vertex.get_flipping_encryption_angle()\n angles.append(encrypted_angle)\n\n else:\n continue\n return angles", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def angle(self) -> int:", "def arc_pts(cx, cy, w, h, start_angle, end_angle, res=5):\n sweep_angle = end_angle - start_angle\n \n if abs(sweep_angle) < 0.0001:\n vx = cx + cos(start_angle) * w / 2.0\n vy = cy + sin(start_angle) * h / 2.0\n return [(vx, vy)]\n num_points = abs(sweep_angle * w / 2) / res\n pts_list = []\n step_angle = float(sweep_angle) / num_points \n va = start_angle\n side = 1 if sweep_angle > 0 else -1\n while va * side < end_angle * side or abs(va - end_angle) < 0.0001:\n vx = cx + cos(va) * w / 2.0\n vy = cy + sin(va) * h / 2.0\n pts_list.append((vx, vy))\n va += step_angle\n return pts_list", "def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])" ]
[ "0.63690925", "0.59872353", "0.5834576", "0.5605631", "0.5481775", "0.5450778", "0.5426255", "0.5415383", "0.540476", "0.54044795", "0.53929913", "0.5386991", "0.5379755", "0.53729224", "0.5372181", "0.5360525", "0.53372157", "0.5305089", "0.53043467", "0.5274299", "0.52560574", "0.52554476", "0.52454716", "0.5230425", "0.5197291", "0.51921314", "0.51663566", "0.51654804", "0.5160156", "0.5153621", "0.51431763", "0.51267385", "0.5125358", "0.5116118", "0.5112866", "0.51095957", "0.5102082", "0.50819033", "0.50762105", "0.5073405", "0.5071278", "0.506618", "0.506461", "0.5063971", "0.50304055", "0.4990901", "0.49863845", "0.4985209", "0.49834594", "0.4982904", "0.4980946", "0.49752444", "0.49667215", "0.4956328", "0.49425858", "0.49344203", "0.49334255", "0.492267", "0.49139443", "0.4911969", "0.48978043", "0.48961374", "0.4891241", "0.48842686", "0.48778892", "0.48706508", "0.48684815", "0.48670557", "0.48646522", "0.4852726", "0.48477164", "0.48471543", "0.48396003", "0.48373273", "0.4835422", "0.48337176", "0.48318473", "0.4830967", "0.4819334", "0.48130777", "0.4813005", "0.480932", "0.47987768", "0.4795432", "0.47934997", "0.47931692", "0.47916484", "0.47910997", "0.4790089", "0.47888595", "0.47841644", "0.47832394", "0.47809413", "0.47803095", "0.4780175", "0.4780175", "0.4780175", "0.47793388", "0.47666296", "0.47592056" ]
0.616338
1
Create a list of Euler angles suitable for projections. method is either 'S' for Saff algorithm or 'P' for Penczek '94 algorithm 'S' assumes phi1> delta ; phiEQpsi set this to 'Minus', if you want psi=phi;
def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'): from math import pi, sqrt, cos, acos angles = [] if (method == 'P'): temp = Util.even_angles(delta, theta1, theta2, phi1, phi2) # phi, theta, psi for i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]); else: #elif (method == 'S'): Deltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0) s = delta*pi/180.0 NFactor = 3.6/s wedgeFactor = abs(Deltaz*(phi2-phi1)/720.0) NumPoints = int(NFactor*NFactor*wedgeFactor) angles.append([phi1, theta1, 0.0]) z1 = cos(theta1*pi/180.0); phi=phi1 # initialize loop for k in xrange(1,(NumPoints-1)): z=z1 + Deltaz*k/(NumPoints-1) r= sqrt(1-z*z) phi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1)) #[k, phi,180*acos(z)/pi, 0] angles.append([phi, 180*acos(z)/pi, 0.0]) #angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07 if (phiEQpsi == 'Minus'): for k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0 if( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] ) return angles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = \"Minus\", symmetry='c1'):\n\n\tfrom math import pi, sqrt, cos, acos, tan, sin\n\tfrom utilities import even_angles_cd\n\tfrom string import lower,split\n\tangles = []\n\tsymmetryLower = symmetry.lower()\n\tsymmetry_string = split(symmetry)[0]\n\tif (symmetry_string[0] == \"c\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif(int(symmetry_string[1:]) > 1):\n\t\t\t\tif( int(symmetry_string[1:])%2 ==0):\n\t\t\t\t\tqt = 360.0/int(symmetry_string[1:])\n\t\t\t\telse:\n\t\t\t\t\tqt = 180.0/int(symmetry_string[1:])\n\t\t\t\tn = len(angles)\n\t\t\t\tfor i in xrange(n):\n\t\t\t\t\tt = n-i-1\n\t\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"d\"):\n\t\tif(phi2 == 359.99):\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi)\n\t\t\tif (int(symmetry_string[1:])%2 == 0):\n\t\t\t\tqt = 360.0/2/int(symmetry_string[1:])\n\t\t\telse:\n\t\t\t\tqt = 180.0/2/int(symmetry_string[1:])\n\t\t\tn = len(angles)\n\t\t\tfor i in xrange(n):\n\t\t\t\tt = n-i-1\n\t\t\t\tif(angles[t][1] == 90.0):\n\t\t\t\t\tif(angles[t][0] >= qt): del angles[t]\n\t\telse:\n\t\t\tangles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)\n\telif(symmetry_string[0] == \"s\"):\n\t\n\t#if symetry is \"s\", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2\n\t\t# for helical, theta1 cannot be 0.0\n\t\tif theta1 > 90.0:\n\t\t\tERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1)\n\t\tif theta1 == 0.0: theta1 =90.0\n\t\ttheta_number = int((90.0 - theta1)/theta2)\n\t\t#for helical, symmetry = s or scn\n\t\tcn = int(symmetry_string[2:])\n\t\tfor j in xrange(theta_number,-1, -1):\n\n\t\t\tif( j == 0):\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tif cn%2 == 0:\n\t\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\telse:\n\t\t\t\t\t\tk=int(359.99/4/cn/delta)\n\t\t\t\telse:\n\t\t\t\t\tERROR(\"For helical strucutre, we only support scn and sdn symmetry\",\"even_angles\",1)\n\n\t\t\telse:\n\t\t\t\tif (symmetry_string[1] ==\"c\"):\n\t\t\t\t\tk=int(359.99/cn/delta)\n\t\t\t\telif (symmetry_string[1] ==\"d\"):\n\t\t\t\t\tk=int(359.99/2/cn/delta)\n\t\t\t\t\t\t\n\t\t\tfor i in xrange(k+1):\n\t\t\t\t\tangles.append([i*delta,90.0-j*theta2,90.0])\n\n\n\telse : # This is very close to the Saff even_angles routine on the asymmetric unit;\n\t\t# the only parameters used are symmetry and delta\n\t\t# The formulae are given in the Transform Class Paper\n\t\t# The symmetric unit \t\tnVec=[]; # x,y,z triples\n\t\t# is defined by three points b,c, v of Fig 2 of the paper\n\t\t# b is (0,0,1)\n\t\t# c is (sin(thetac),0,cos(thetac))\n\t\t# a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac))\n\t\t# f is the normalized sum of all 3\n\t\t\n\t\t# The possible symmetries are in list_syms\n\t\t# The symmetry determines thetac and Omega\n\t\t# The spherical area is Omega - pi/3; \n\t\t# should be equal to 4 *pi/(3*# Faces)\n\t\t#\t\t\n\t\t# symmetry ='tet'; delta = 6;\n\n\t\tscrunch = 0.9 # closeness factor to eliminate oversampling corners\n\t\t#nVec=[] # x,y,z triples\n\n\t\tpiOver = pi/180.0\n\t\tCount=0 # used to count the number of angles\n\t\t\n\t\tif (symmetryLower[0:3] ==\"tet\"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps\n\t\telif (symmetryLower[0:3] ==\"oct\"): m=4.0; fudge=0.8\n\t\telif (symmetryLower[0:3] ==\"ico\"): m=5.0; fudge=0.95\n\t\telse: ERROR(\"allowable symmetries are cn, dn, tet, oct, icos\",\"even_angles\",1)\n\n\t\tn=3.0\n\t\tOmegaR = 2.0*pi/m; cosOmega= cos(OmegaR)\n\t\tEdges = 2.0*m*n/(2.0*(m+n)-m*n)\n\t\tFaces = 2*Edges/n\n\t\tArea = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega\n\t\tcosthetac = cosOmega/(1-cosOmega)\n\t\tdeltaRad= delta*pi/180\n\t\tNumPoints = int(Area/(deltaRad*deltaRad))\n\t\tfheight = 1/sqrt(3)/ (tan(OmegaR/2.0))\n\n\t\tz0 = costhetac # initialize loop\t\n\t\tz = z0\n\t\tphi = 0\n\t\tDeltaz = (1-costhetac)/(NumPoints-1)\n\n\t\t#[1, phi,180.0*acos(z)/pi,0.]\n\t\tanglesLast = [phi,180.0*acos(z)/pi,0.]\n\t\tangles.append(anglesLast)\n\t\tnLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\tnVec = []\n\t\tnVec.append(nLast)\n\n\t\tCount +=1\n\n\t\tfor k in xrange(1,(NumPoints-1)):\n\t\t\tz=z0 + Deltaz*k # Is it higher than fhat or lower\n\t\t\tr= sqrt(1-z*z)\n\t\t\tif (z > fheight): phiRmax= OmegaR/2.0\n\t\t\tif (z<= fheight):\n\t\t\t\tthetaR = acos(z); \n\t\t\t\tcosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega);\n\t\t\t\tphiMax = 180.0*( OmegaR - acos(cosStuff))/pi\n\t\t\tangleJump = fudge* delta/r\n\t\t\tphi = (phi + angleJump)%(phiMax)\n\t\t\tanglesNew = [phi,180.0*acos(z)/pi,0.];\n\t\t\tnNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]\n\t\t\tdiffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)] \n\t\t\tdiffMin = min(diffangleVec)\n\t\t\tif (diffMin>angleJump*piOver *scrunch):\n\t\t\t\tCount +=1\n\t\t\t\tangles.append(anglesNew)\n\t\t\t\tnVec.append(nNew)\n\t\t\t\t#[Count, phi,180*acos(z)/pi,0.]\n\t\t\tanglesLast = anglesNew\n\t\t\tnLast=nNew\n\n\t\tangles.append( [0.0, 0.0, 0.0] )\n\t\tnLast= [ 0., 0. , 1.]\n\t\tnVec.append(nLast)\n\t\tif(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] )\n\t\t\n\t\tangles.reverse()\n\t\tif(phiEqpsi == \"Minus\"):\n\t\t\tfor i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0\n\t\t#print(Count,NumPoints)\n\t\t\n#\t\tlook at the distribution\n#\t\tCount =len(angles); piOver= pi/180.0;\n#\t\tphiVec = [ angles[k][0] for k in range(Count)] ;\n#\t\tthetaVec = [ angles[k][1] for k in range(Count)] ;\n#\t\txVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tyVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ]\n#\t\tzVec = [cos(piOver * angles[k][1]) for k in range(Count) ]\n#\t\tpylab.plot(yVec,zVec,'.'); pylab.show()\n\n\n\treturn angles", "def euler2Q(self, (phi, theta, psi)):\n\thalf_phi = 0.5*phi;\n\thalf_theta = 0.5*theta;\n\thalf_psi = 0.5*psi;\n\n return np.asarray([\n (cos(half_phi)*cos(half_theta)*cos(half_psi)) + (sin(half_phi)*sin(half_theta)*sin(half_psi)),\n (sin(half_phi)*cos(half_theta)*cos(half_psi)) - (cos(half_phi)*sin(half_theta)*sin(half_psi)),\n (cos(half_phi)*sin(half_theta)*cos(half_psi)) + (sin(half_phi)*cos(half_theta)*sin(half_psi)),\n (cos(half_phi)*cos(half_theta)*sin(half_psi)) - (sin(half_phi)*sin(half_theta)*cos(half_psi))\n ]);", "def Euler2Rotation(phi, theta, psi):\n # only call sin and cos once for each angle to speed up rendering\n c_phi = np.cos(phi)\n s_phi = np.sin(phi)\n c_theta = np.cos(theta)\n s_theta = np.sin(theta)\n c_psi = np.cos(psi)\n s_psi = np.sin(psi)\n\n R_roll = np.array([[1, 0, 0],\n [0, c_phi, s_phi],\n [0, -s_phi, c_phi]])\n R_pitch = np.array([[c_theta, 0, -s_theta],\n [0, 1, 0],\n [s_theta, 0, c_theta]])\n R_yaw = np.array([[c_psi, s_psi, 0],\n [-s_psi, c_psi, 0],\n [0, 0, 1]])\n\n R = R_roll @ R_pitch @ R_yaw # inertial to body (Equation 2.4 in book)\n return R.T # transpose to return body to inertial", "def _euler_90_algorithm(self):\n # define scale factor from min radius and output angle (which is ninety degrees), grab radius from input\n output_angle = np.pi / 2.\n effective_radius = self.radius\n # Euler curvature scaling factor, determined from calculating a 1. radius term and looking at output\n min_radius = effective_radius / 1.87009582269\n a_scale = 2. * min_radius * (output_angle / 2.0)**0.5\n # too many points causes issues on gdsii, splitting over different sizes is probably most suitable way\n if effective_radius < 30.:\n points = 50\n else:\n points = 80\n # Create t array for calculating parametric curve\n end_t = (output_angle / 2.0)**0.5\n all_t = np.linspace(0., end_t, points)\n # Create a list for x values and generate the x components of parametric curve using loop\n xs = list()\n for t in all_t:\n xs.append(a_scale * (t - (1 / 10.) * t**5 + (1 / 216.) * t**9 - (1 / 9360.) * t**13 + (1 / 685440.) * t**17))\n # Do the same for y values\n ys = list()\n for t in all_t:\n ys.append(a_scale * (t**3 * (1 / 3.) - (1 / 42.) * t**7 + (1 / 1320.) * t**11 - (1 / 75600.) * t**15))\n # Combine the xs and ys to perform the mirroring operation\n start_euler_xy = zip(xs, ys)\n # Calculating Mirror curve for X and Y, need x axis angle and end positions\n angle_x = np.pi / 2. + output_angle / 2.\n end_x = start_euler_xy[-1][0]\n end_y = start_euler_xy[-1][1]\n # initialising for loops, looping using checked equations from Mathematica for mirroring around line\n x_mirror = list()\n y_mirror = list()\n for elem in start_euler_xy:\n x_mirror.append(end_x + np.cos(2 * angle_x) * (elem[0] - end_x) + np.sin(2 * angle_x) * (elem[1] - end_y))\n\n for elem in start_euler_xy:\n y_mirror.append(end_y + np.sin(2 * angle_x) * (elem[0] - end_x) - np.cos(2 * angle_x) * (elem[1] - end_y))\n\n # takes output of mirrors, flips them and combines them\n mirror_xy = zip(x_mirror[::-1], y_mirror[::-1])\n\n # Combines initial and mirrored list to generate the euler curve\n euler_full = start_euler_xy + mirror_xy\n return euler_full", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 130 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57 and not np.abs(omega - np.deg2rad(self.omega)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #Omega is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent phi\n return [phi]", "def Q2euler(self, q):\n\n\tphi = mt.atan2(2.0*((q[2]*q[3])+(q[0]*q[1])), (q[0]**2.0)-(q[1]**2.0)-(q[2]**2.0)+(q[3]**2.0));\n\tpsi = mt.atan2(2.0*((q[1]*q[2])+(q[0]*q[3])), (q[0]**2.0)+(q[1]**2.0)-(q[2]**2.0)-(q[3]**2.0));\n try:\n theta = mt.asin(2.0*((q[0]*q[2])-(q[1]*q[3])));\n except ValueError:\n print \"ERRO: norm(Q) = %f\" % np.sqrt(np.sum(q**2))\n theta = 0;\n\n return (phi, theta, psi)", "def calculate_angles_to_rotate_vector(self, starting_vec, ending_vec, starting_angles=None, search_method=0):\n #Find the starting rotation matrix\n if not starting_angles is None:\n (phi, chi, omega) = starting_angles[0:3]\n starting_rot_matrix = numpy_utils.rotation_matrix(phi, chi, omega)\n #Rotate the starting vector\n starting_vec = np.dot(starting_rot_matrix, column(starting_vec)).flatten()\n\n #Find the rotation matrix that satisfies ending_vec = R . starting_vec\n\n #The cross product of q0 X q_over_a gives a rotation axis to use\n rotation_axis = np.cross(starting_vec, ending_vec)\n\n #Now we find the rotation angle about that axis that puts q0 on q_over_a\n angle = np.arccos( np.dot(starting_vec, ending_vec) / (vector_length(starting_vec)*vector_length(ending_vec)))\n\n #Make the rot. matrix\n R = numpy_utils.rotation_matrix_around_vector(rotation_axis, angle)\n\n if not starting_angles is None:\n #The final rotation we want is starting_rot_matrix 1st; R second.\n # So this is the resulting matrix\n R = np.dot(R, starting_rot_matrix)\n\n #The function finds some angles that work\n angles = numpy_utils.angles_from_rotation_matrix(R)\n\n #Position is always allowed\n return (angles)", "def refine_angles(self, method='nelder', **opts):\n self.set_idx()\n from lmfit import fit_report, minimize\n p0 = self.define_parameters(**opts)\n self.result = minimize(self.angle_residuals, p0, method=method)\n self.fit_report = fit_report(self.result)\n if self.result.success:\n self.get_parameters(self.result.params)", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def angle_calc(sides):\n return 360//sides", "def rotateEuler(self,axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.c_[phi, theta, psi]", "def phi_list ( self ) :\n return self.__phi_list", "def steps_to_angle():\n pass", "def set_rama_angles(moving_h, angles, direction_forward=True, check_omega=False):\n # print \"angles\", angles\n # STOP()\n result_h = moving_h.deep_copy()\n result_h.reset_atom_i_seqs()\n fixed_omega = False\n phi_psi_atoms = utils.get_phi_psi_atoms(moving_h, omega=True)\n assert len(phi_psi_atoms) == len(angles), \"%d != %d\" % (len(phi_psi_atoms), len(angles))\n if not direction_forward:\n phi_psi_atoms.reverse()\n angles.reverse()\n for ps_atoms, target_angle_pair in zip(phi_psi_atoms, angles):\n phi_psi_pair = ps_atoms[0]\n # print \"phi_psi_pair\", phi_psi_pair\n omega = ps_atoms[2]\n phi_psi_angles = utils.get_pair_angles(phi_psi_pair)\n # print \"ps_atoms, target_angle_pair\", phi_psi_angles, target_angle_pair\n # phi\n if target_angle_pair[0] is not None and phi_psi_angles[0] is not None:\n rotation_angle = -phi_psi_angles[0]+target_angle_pair[0]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][1],\n phi_psi_pair[0][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # psi\n if target_angle_pair[1] is not None and phi_psi_angles[1] is not None:\n rotation_angle = -phi_psi_angles[1]+target_angle_pair[1]\n # print \"rot angle\", rotation_angle\n # if not direction_forward:\n # rotation_angle = -rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[1][1],\n phi_psi_pair[1][2],\n angle=rotation_angle,\n direction_forward=direction_forward)\n # omega\n if omega is not None and abs(abs(omega)-180) > 10 and check_omega:\n rotation_angle= -omega+180\n # print \"Omega rotation:\", omega, rotation_angle\n utils.rotate_atoms_around_bond(\n result_h,\n phi_psi_pair[0][0],\n phi_psi_pair[0][1],\n angle=rotation_angle,\n direction_forward=direction_forward)\n fixed_omega = True\n # print utils.list_rama_outliers_h(result_h)\n # result_h.write_pdb_file(file_name=\"variant_%s.pdb\" % direction_forward)\n # STOP()\n return result_h, fixed_omega", "def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def equations(p):\n [x, y] = p\n list = [x - 5 , y - 5]\n return list", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(omega - np.deg2rad(self.omega)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, chi]", "def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n #Chi needs to be 45 degrees! So we take it out\n\n if not np.abs(chi - np.deg2rad(self.chi)) < 0.1/57:\n #Chi is not within +-0.1 degree of the fixed chi value degrees!\n #print \"Warning! Found angles\", np.rad2deg(best_angles), \" where chi is more than 1 degree off of fixed value.\"\n return None\n else:\n #Okay, we found a decent chi\n return [phi, omega]", "def solver(self, alpha):\n if alpha == 0: # skip divided by 0 error\n return [0], [0] # r and phi=0\n\n if alpha == 180:\n return [self.D], [0] # if angle= pi then, tan(pi)=0 so 1/tan=1/0\n\n # initial value for position and angular speed\n y0 = [1/self.D, 1/(self.D*math.tan(math.radians(alpha)))]\n sol = solve_ivp(fun=self._diff_eq, t_span=[0, 10*pi], y0=y0, method='Radau', events=[self._eventRs]) #, self._eventR])#,t_eval=np.linspace(0, t_max, 10000)) #dense_output=False\n\n if sol.t[-1] == 10*pi:\n raise StopIteration(\"solver error, alpha reached computation limit (loop number)\")\n\n phi = np.array(sol.t)\n r = np.abs(1/sol.y[0, :]) # must use this because solver can't be stop before infinity because negative\n\n return r, phi", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def range_finder_angles(self):\n return -90, -75, -60, -45, -30, -20, -15, -10, -5, 0, 5, 10, 15, 20, \\\n 30, 45, 60, 75, 90", "def euler_from_quaternion(self, quaternion):\n x = quaternion.x\n y = quaternion.y\n z = quaternion.z\n w = quaternion.w\n\n sinr_cosp = 2 * (w * x + y * z)\n cosr_cosp = 1 - 2 * (x * x + y * y)\n roll = np.arctan2(sinr_cosp, cosr_cosp)\n\n sinp = 2 * (w * y - z * x)\n pitch = np.arcsin(sinp)\n\n siny_cosp = 2 * (w * z + x * y)\n cosy_cosp = 1 - 2 * (y * y + z * z)\n yaw = np.arctan2(siny_cosp, cosy_cosp)\n\n return roll, pitch, yaw", "def get_angles(sides):\n return [get_angle(sides[1], sides[2], sides[0]),\n get_angle(sides[2], sides[0], sides[1]),\n get_angle(sides[0], sides[1], sides[2])]", "def phases_from_superoperator(U):\n if U.type=='oper':\n phi_00 = np.rad2deg(np.angle(U[0, 0])) # expected to equal 0 because of our\n # choice for the energy, not because of rotating frame. But not guaranteed including the coupling\n phi_01 = np.rad2deg(np.angle(U[1, 1]))\n phi_10 = np.rad2deg(np.angle(U[3, 3]))\n phi_11 = np.rad2deg(np.angle(U[4, 4]))\n phi_02 = np.rad2deg(np.angle(U[2, 2])) # used only for avgatefid_superoperator_phasecorrected\n phi_20 = np.rad2deg(np.angle(U[6, 6])) # used only for avgatefid_superoperator_phasecorrected\n\n elif U.type=='super':\n phi_00 = 0 # we set it to 0 arbitrarily but it is indeed not knowable\n phi_01 = np.rad2deg(np.angle(U[1, 1])) # actually phi_01-phi_00 etc\n phi_10 = np.rad2deg(np.angle(U[3, 3]))\n phi_11 = np.rad2deg(np.angle(U[4, 4]))\n phi_02 = np.rad2deg(np.angle(U[2, 2]))\n phi_20 = np.rad2deg(np.angle(U[6, 6]))\n\n phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360 # still the right formula independently from phi_00\n\n return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond", "def equations(t, y, args):\n x1 = y[0] # x1 = theta1, angle\n x2 = y[1] # x2 = theta2, angle\n p1 = y[2] # p1 = omega1, angular velocity\n p2 = y[3] # p2 = omega2, angular velocity\n l1, l2, m1, m2, g = args\n x1_eq = p1\n x2_eq = p2\n p1_eq = -((g*(2*m1+m2)*np.sin(x1)+m2*(g*np.sin(x1-2*x2)+2*(l2*p2**2+l1*p1 **\n 2*np.cos(x1-x2))*np.sin(x1-x2)))/(2*l1*(m1+m2-m2*(np.cos(x1-x2))**2)))\n p2_eq = ((l1*(m1+m2)*p1**2+g*(m1+m2)*np.cos(x1)+l2*m2*p2**2 *\n np.cos(x1-x2))*np.sin(x1-x2))/(l2*(m1+m2-m2*(np.cos(x1-x2))**2))\n return [x1_eq, x2_eq, p1_eq, p2_eq]", "def rotateEuler(axis, angle):\n if(axis == 'Z'):\n return np.array([[cos(angle), -sin(angle),0,0],[sin(angle), cos(angle),0,0],[0,0,1,0],[0,0,0,1]])\n if(axis == 'Y'):\n return np.array([[cos(angle),0,sin(angle),0],[0,1,0,0],[-sin(angle),0,cos(angle),0],[0,0,0,1]])\n if(axis == 'X'):\n return np.array([[1,0,0,0],[0,cos(angle), -sin(angle),0],[0,sin(angle), cos(angle),0],[0,0,0,1]])", "def phi_up(self):\n return 0.5 * (self.phi + 10 * (self.phi / 30.0) ** 2) / 180.0 * np.pi", "def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle\n theta = -np.sin(self.A[0, 2]) # Pitch Angle\n psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle\n return np.array([phi, theta, psi])", "def getAngles(self):\n try:\n return self._angleList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"ANGLE_FORCE_CONSTANT\"]\n angleEquil=self._raw_data[\"ANGLE_EQUIL_VALUE\"]\n anglePointers = self._raw_data[\"ANGLES_INC_HYDROGEN\"] \\\n +self._raw_data[\"ANGLES_WITHOUT_HYDROGEN\"]\n self._angleList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole/(units.radian*units.radian)).conversion_factor_to(units.kilojoule_per_mole/(units.radian*units.radian))\n for ii in range(0,len(anglePointers),4):\n if int(anglePointers[ii])<0 or \\\n int(anglePointers[ii+1])<0 or \\\n int(anglePointers[ii+2])<0:\n raise Exception(\"Found negative angle atom pointers %s\"\n % ((anglePointers[ii],\n anglePointers[ii+1],\n anglePointers[ii+2]),))\n iType=int(anglePointers[ii+3])-1\n self._angleList.append((int(anglePointers[ii])//3,\n int(anglePointers[ii+1])//3,\n int(anglePointers[ii+2])//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(angleEquil[iType])))\n return self._angleList", "def calculate_phi_vals(self) -> None:\n for point in self.points:\n point.phi = - (point.psi + point.theta)", "def eulerphi(n):\n\treturn euler_phi(n)", "def test_float_input_angles_and_phase(self):\n decomposer = OneQubitEulerDecomposer(\"PSX\")\n input_matrix = np.array(\n [\n [0.70710678, 0.70710678],\n [0.70710678, -0.70710678],\n ],\n dtype=np.float64,\n )\n (theta, phi, lam, gamma) = decomposer.angles_and_phase(input_matrix)\n expected_theta = 1.5707963267948966\n expected_phi = 0.0\n expected_lam = 3.141592653589793\n expected_gamma = -0.7853981633974483\n self.assertAlmostEqual(theta, expected_theta)\n self.assertAlmostEqual(phi, expected_phi)\n self.assertAlmostEqual(lam, expected_lam)\n self.assertAlmostEqual(gamma, expected_gamma)", "def calculate_angles_to_rotate_vector(self, *args, **kwargs):\n #The parent class does the work\n best_angles = LimitedGoniometer.calculate_angles_to_rotate_vector(self, *args, **kwargs)\n\n if best_angles is None:\n return None\n else:\n (phi, chi, omega) = best_angles\n \n if not np.abs(chi - np.deg2rad(self.chi)) < 0.5/57:\n # Have some tolerance (1 deg) in chi to help find anything. \n return None\n else:\n #Okay, we found a decent chi\n return [omega]", "def test_float_input_angles(self):\n decomposer = OneQubitEulerDecomposer(\"PSX\")\n input_matrix = np.array(\n [\n [0.70710678, 0.70710678],\n [0.70710678, -0.70710678],\n ],\n dtype=np.float64,\n )\n (theta, phi, lam) = decomposer.angles(input_matrix)\n expected_theta = 1.5707963267948966\n expected_phi = 0.0\n expected_lam = 3.141592653589793\n self.assertAlmostEqual(theta, expected_theta)\n self.assertAlmostEqual(phi, expected_phi)\n self.assertAlmostEqual(lam, expected_lam)", "def calculate_angles_to_rotate_vector(self, starting_vec, ending_vec, starting_angles=None, search_method=0):\n# print \"starting_vec, ending_vec\", starting_vec, ending_vec\n\n # We want to find a rotation matrix R\n # R puts starting_vec onto ending_vec\n # But R has the freedom to rotate all the other axes around ending_vec - all\n # of these are equally valid.\n\n if np.allclose(vector_length(starting_vec), 0) or np.allclose(vector_length(ending_vec), 0):\n return None\n\n #Normalize our vectors\n starting_vec = starting_vec/vector_length(starting_vec)\n ending_vec = ending_vec/vector_length(ending_vec)\n\n #Find an initial rotation matrix.\n # We'll rotate around the cross-product of start x end, staying in the plane defined by these vectors\n rotation_axis = np.cross(starting_vec, ending_vec)\n #TODO: check for too-close vectors to get a valid cross-product\n angle = np.arccos( np.dot(starting_vec, ending_vec) )\n initial_R = numpy_utils.rotation_matrix_around_vector(rotation_axis, angle)\n\n result_vec = np.dot(initial_R, column(starting_vec)).flatten()\n #Check that the matrices match, but not if all are NaN\n #if not np.any(np.isnan(result_vec) and np.isnan(ending_vec)):\n if not np.any(np.isnan(result_vec)):\n assert np.allclose( result_vec, ending_vec), \"initial rotation matrix makes the correct rotation. Got %s, expected %s\" % ( result_vec, ending_vec)\n\n\n def optimize(start, stop, step):\n \"\"\"Routine to optimize by brute force\"\"\"\n #Go through every angle\n rot_angle_list = np.arange(start, stop, step)\n fitness_list = []\n best_angles_list = []\n for (i, rot_angle) in enumerate(rot_angle_list):\n (fitness, best_angles) = self._angle_fitness(rot_angle, initial_R, ending_vec, starting_vec)\n fitness_list.append(fitness)\n best_angles_list.append(best_angles)\n #Find the best result\n best_index = np.argmin(fitness_list)\n best_rot_angle = rot_angle_list[best_index]\n best_angles = best_angles_list[best_index]\n return (best_rot_angle, best_angles)\n\n\n def optimize_c_code(start, stop, step):\n \"\"\"Routine to optimize by brute force\"\"\"\n #Go through every angle\n rot_angle_list = np.arange(start, stop, step)\n (best_rot_angle, best_angles) = self._angle_fitness_brute(rot_angle_list, initial_R, ending_vec, starting_vec)\n return (best_rot_angle, best_angles)\n\n args = (initial_R, ending_vec, starting_vec)\n\n if search_method:\n #--- scipy optimize ----\n\n # Get a starting point by brute force \n step = np.deg2rad(2)\n (best_rot_angle, best_angles) = optimize_c_code(-2.2*pi, pi*2.2, step)\n\n # And optimize with that\n if False:\n x0 = best_rot_angle\n res = scipy.optimize.fminbound(self._angle_fitness, 0, 2*pi, args, xtol=4e-3, disp=0, maxfun=100, full_output=0)\n best_rot_angle = res\n else:\n x0 = np.array([ best_rot_angle ])\n res = scipy.optimize.fmin(self._angle_fitness_python, x0, args, xtol=4e-3, ftol=1e-2, disp=0, maxiter=100)\n best_rot_angle = res.reshape( (1) )[0] #avoid error with 0-dimension array\n\n #Call the same function to get the best angles too\n (fitness, best_angles) = self._angle_fitness_python(best_rot_angle, *args)\n\n else:\n #--- semi-brute optimization routine ----\n #for optimize_func in [optimize, optimize_c_code]:\n step = np.deg2rad(2)\n # (best_rot_angle, best_angles) = optimize_c_code(-0.2*pi, pi*2.2, step)\n (best_rot_angle, best_angles) = optimize_c_code(-1.2*pi, pi*1.2, step)\n for x in xrange(4):\n newstep = step/10\n (best_rot_angle, best_angles) = optimize_c_code(best_rot_angle-step, best_rot_angle+step, newstep)\n step = newstep\n\n #Optimized angles\n return best_angles", "def vecRot(data, seq, euler_angles, **kwargs):\n from scipy.spatial.transform import Rotation as R\n r = R.from_euler(seq, euler_angles, **kwargs)\n return r.apply(data)", "def get_euler_angles_from_T(T):\n pass", "def rotationMatrixToEulerAngles(R) :\n sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n\n if not singular :\n x = np.arctan2(R[2,1] , R[2,2])\n y = np.arctan2(-R[2,0], sy)\n z = np.arctan2(R[1,0], R[0,0])\n else :\n x = np.arctan2(-R[1,2], R[1,1])\n y = np.arctan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])", "def eulerphi(n):\r\n\treturn euler_phi(n)", "def angle(self) -> float:\n ...", "def angsep(phi, theta, deg=True):\n ra1, ra2 = phi\n dec1, dec2 = theta\n\n if deg==True:\n ra1, ra2 = np.radians(ra1), np.radians(ra2)\n dec1, dec2 = np.radians(dec1), np.radians(dec2)\n \n sin = np.sin\n cos = np.cos\n return np.arccos( sin(dec1)*sin(dec2)+cos(dec1)*cos(dec2)*cos(ra1-ra2) )", "def polarizer(px,py,angle=0):\n M = np.array([[px,0],[0,py]])\n if angle != 0:\n return Jones.rotate(M,angle)\n else:\n return M", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0.5*angles)\n rot_seq = rot_seq.lower()\n if rot_seq == 'zyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'zyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'zxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'zxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2]])\n elif rot_seq == 'yxz':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yxy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] + cangle[0]*cangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'yzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'yzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xyz':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*sangle[1]*sangle[2],\n cangle[0]*sangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*sangle[1]*cangle[2]])\n elif rot_seq == 'xyx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*sangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2]])\n elif rot_seq == 'xzy':\n return np.array([cangle[0]*cangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2],\n sangle[0]*cangle[1]*cangle[2] - cangle[0]*sangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*cangle[1]*sangle[2]])\n elif rot_seq == 'xzx':\n return np.array([cangle[0]*cangle[1]*cangle[2] - sangle[0]*cangle[1]*sangle[2],\n cangle[0]*cangle[1]*sangle[2] + sangle[0]*cangle[1]*cangle[2],\n cangle[0]*sangle[1]*sangle[2] - sangle[0]*sangle[1]*cangle[2],\n cangle[0]*sangle[1]*cangle[2] + sangle[0]*sangle[1]*sangle[2]])\n else:\n return False", "def test_vectors_angle2(self):\n\n # Example 1.4\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle2_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle2_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def convergence_angle(self):\n return np.arctan2(self.radius, self.focal_length)", "def partial_euler(angle = 90, Rmin = 3, Reff = None, p = 0.2, num_pts = 720):\n # Overhead calculations\n num_pts = abs(int(num_pts * angle/360))\n angle = np.radians(angle)\n sp = np.sqrt(p*angle) # Clothoid-to-normal transition point s value\n s0 = 2*sp + angle*(1-p)/(2*np.sqrt(p*angle/2))\n c = 1 / (2*sp*Rmin) # Scaling factor to enforce Rmin\n print(sp)\n\n # Constructing s and K arrays\n s = np.linspace(0, s0, num_pts)\n if p == 0: K = np.array([[1/Rmin] * len(s)])\n else:\n i1 = np.argmax(s > sp)\n i2 = np.argmax(s >= s0 - sp)\n K = c * np.concatenate([np.multiply(np.ones(i1), 2*s[:i1]),\n np.multiply(np.ones(i2-i1), 2*sp),\n np.multiply(np.ones(num_pts-i2), \n 2*(s0 - s[i2:num_pts]))])\n\n # Integrating to find x and y\n ds = s[1] - s[0]\n phi = cumtrapz(K*ds)\n x, y = np.concatenate([np.array([[0],[0]]), \n np.cumsum([ds*np.cos(phi), ds*np.sin(phi)], axis = 1)],\n axis = 1)\n\n return x, y", "def phi_deg(self):\n return self.phi * 180 / np.pi", "def angle(self):\n return np.array([f.angle() for f in self])", "def spin_husimi_qfunc(density_op, theta, phi, *, method=\"su2\"):\n if method == \"qutip\":\n Q, *_ = spin_q_function(density_op, theta, phi)\n return Q\n elif method == \"vectorised\":\n return my_spin_q_func(density_op, theta, phi)\n elif method == \"su2\":\n st = np.sin(theta / 2)\n ct = np.cos(theta / 2)\n epip = np.exp(1j * phi / 2)\n emip = np.exp(-1j * phi / 2)\n\n A = st * emip\n B = ct * epip\n\n Q = (\n A * A.conj() * density_op[1, 1]\n + A.conj() * B * density_op[1, 0]\n + A * B.conj() * density_op[0, 1]\n + B * B.conj() * density_op[0, 0]\n ) / (2 * np.pi)\n # assert np.count_nonzero(Q.imag) == 0\n assert np.all(np.abs(Q.imag) < 1e-10)\n return Q.real", "def dir_vect(theta):\n return np.array([np.cos(theta),np.sin(theta)])", "def translate_from_rpc(rpcEulerAngle):\n return EulerAngle(\n \n rpcEulerAngle.roll_deg,\n \n \n rpcEulerAngle.pitch_deg,\n \n \n rpcEulerAngle.yaw_deg\n )", "def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)", "def translate_to_rpc(self, rpcEulerAngle):\n\n \n \n \n rpcEulerAngle.roll_deg = self.roll_deg\n \n \n \n \n \n rpcEulerAngle.pitch_deg = self.pitch_deg\n \n \n \n \n \n rpcEulerAngle.yaw_deg = self.yaw_deg", "def propagator(self, t, method='SOS'):\n\n\n # construct total liouvillian\n if self.R is None:\n raise TypeError('Redfield tensor is not computed. Please call redfield_tensor()')\n\n\n if method == 'EOM':\n\n U = expm(self.R, t)\n\n # store the Green's function for future use G(t) = -1j * (t>0) * U(t)\n # G = np.zeros((self.dim**2, self.dim**2, len(t)), dtype=complex)\n # for n in range(len(t)):\n # G[:,:, n] = -1j * U[n].toarray()\n\n # G = -1j * np.dstack(U)\n\n U = [_.toarray() for _ in U]\n\n self.U = np.dstack(U)\n\n elif method in ['eseries', 'SOS']:\n\n evals1, U1 = eig(self.R.toarray())\n\n U2 = scipy.linalg.inv(U1)\n\n E = np.exp(evals1[:,np.newaxis] * t[np.newaxis,:])\n # self.U = np.einsum('aj, jk, jb -> abk', U1, E, U2)\n self.U = oe.contract('aj, jk, jb -> abk', U1, E, U2)\n\n self.G = -1j * self.U\n\n return self.U", "def _euler_spun_90(self):\n # specifying variables from shape using .self and the cutting coordinates to fit my old definition\n effective_radius = self.radius\n start_point = cutting_coordinates(self.original_shape[0], self.original_shape[1], self.radius)[0]\n bend_point = self.original_shape[1]\n end_point = cutting_coordinates(self.original_shape[2], self.original_shape[1], self.radius)[0]\n end_cut = cutting_coordinates(self.original_shape[2], self.original_shape[1], self.radius)\n\n # Uses Euler curve definition with 90 degree angle for l bend and rotates for input angle\n standard_curve = self._euler_90_algorithm()\n\n # determine which direction the curve is going by comparing the start and end point and input angle\n direction = self._left_or_right()\n input_angle = np.arctan2((bend_point[1] - start_point[1]), (bend_point[0] - start_point[0]))\n\n # calculates the cutting positions around the bend point from the input radius\n first_cut = start_point\n second_cut = end_point\n\n # Rotate the outputs by rotation matrix and offset, flip y depending on left or right\n spun_x = []\n for elems in standard_curve:\n spun_x.append(elems[0] * np.cos(input_angle) + elems[1] * -1. * np.sin(input_angle) * direction + first_cut[0])\n\n spun_y = []\n for elems in standard_curve:\n spun_y.append(elems[0] * np.sin(input_angle) + elems[1] * np.cos(input_angle) * direction + first_cut[1])\n\n # combine the flipped(or not) y's and normal x's\n full_spun = zip(spun_x, spun_y)\n\n # remove final point and replace it with the end point just to avoid the radius scaling issue\n # need another definition because the structure needs to a list of 1 tuple!\n full_shape = full_spun[:-1] + end_cut\n return full_shape", "def standarize_euler(euler: np.ndarray, in_radian=True) -> np.ndarray:\n if not in_radian:\n euler = np.radians(euler)\n return np.where(\n euler<0, \n (euler+2.0*np.pi)%np.array([2.0*np.pi,np.pi,2.0*np.pi]),\n euler%(2*np.pi)\n )", "def angles(self, num: int) -> Iterable[float]:\n if num < 2:\n raise ValueError(\"num >= 2\")\n start = self.dxf.start_angle % 360\n stop = self.dxf.end_angle % 360\n if stop <= start:\n stop += 360\n for angle in linspace(start, stop, num=num, endpoint=True):\n yield angle % 360", "def ModuloCorrection(angle):\n \n \n newAngle = np.array([])\n for i in angle:\n if abs(i) <= np.pi:\n newAngle = np.append(newAngle, i)\n \n else:\n sign = i/np.abs(i) \n halfRotations = int((np.abs(i)) // np.pi)\n remainder = ((np.abs(i)) % np.pi)\n if (halfRotations % 2) == 0:\n newAngle = np.append(newAngle, sign*remainder)\n else:\n newAngle = np.append(newAngle, sign*(remainder - np.pi))\n \n return newAngle", "def angles(self):\n self._sort_measurements()\n return self._angles", "def parangle(ra, dec, utdate, uttime, site, verbose=False):\n # degrees per radian\n degrad = 180. * u.deg /(np.pi * u.rad)\n\n l_ra = ra.strip()\n l_dec = dec.strip()\n if '-' not in l_dec and l_dec[0] != '+':\n l_dec = '+' + l_dec\n\n # Coordinate object\n coord = SkyCoord(l_ra,l_dec,frame='icrs',unit = (u.hr, u.deg))\n\n # Observation time\n obs_time = Time(utdate + 'T' + uttime, format='isot', scale='utc')\n\n # Location\n location = EarthLocation.of_site(site)\n if verbose:\n print('Site: ', location)\n\n altaz = coord.transform_to(AltAz(obstime=obs_time, location=location))\n if verbose:\n print('Alt/Az: ', altaz.alt.deg, altaz.az.deg)\n\n # Hour angle\n ha = np.arcsin(-np.sin(altaz.az) * np.cos(altaz.alt) / np.cos(coord.dec))\n if verbose:\n print('HA: ', ha)\n\n # Parallactic angle\n parang = -degrad * np.arctan2(-np.sin(ha),\n np.cos(coord.dec) * np.tan(location.lat) - np.sin(coord.dec) * np.cos(ha))\n\n return parang", "def cal_eta(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for eta routine)')\n \n theta=math.acos(self.pz/math.sqrt(self.px**2+self.py**2+self.pz**2))\n self.eta=-math.log(math.tan(theta/2.0))", "def ang_deflection(p=75, f=1e11, p1=database['K+'], p2=database['pi+'],\r\n L_t=79.6, l=2.74, E=1e6,delta_p=1.6e-2):\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n L = L_t - (2*l)\r\n A = (e*E*l)/(p*c)\r\n tau_21 = (2*np.pi*((L*f)/(c))*((1/beta(p, m2))-(1/beta(p, m1))))*(1-(2*delta_p))\r\n# tau_21 = ((np.pi*L*f)/c)*((m2**2-m1**2)/p**2)\r\n return abs(np.arctan(2*A*np.sin(tau_21/2))*1e3)", "def projection_angles(name):\n if name == 'xy':\n return 0, 0, 0\n elif name == 'xz':\n return -np.pi/2, 0, 0\n elif name == 'yz':\n return -np.pi/2, 0, -np.pi/2\n elif name == 'yx':\n return 0, np.pi, np.pi/2\n elif name == 'zx':\n return np.pi/2, np.pi/2, 0\n elif name == 'zy':\n return np.pi, np.pi/2, np.pi\n else:\n raise ValueError('Invalid projection name: {!r}.'.format(name))", "def calcScatterAngleOld(R, PHI, THETA, sun_rotation):\n \n H_rot = atmo_utils.calcRotationMatrix(sun_rotation)\n\n X_ = R * np.sin(THETA) * np.cos(PHI)\n Y_ = R * np.sin(THETA) * np.sin(PHI)\n Z_ = R * np.cos(THETA)\n \n XYZ_dst = np.vstack((X_.ravel(), Y_.ravel(), Z_.ravel(), np.ones(R.size)))\n XYZ_src_ = np.dot(H_rot, XYZ_dst)\n \n Z_rotated = XYZ_src_[2, :]\n R_rotated = np.sqrt(np.sum(XYZ_src_[:3, :]**2, axis=0))\n \n angle = np.arccos(Z_rotated/(R_rotated+amitibo.eps(R_rotated)))\n \n return angle", "def Phi(l,m,theta,phi):\n Psilm_th, Psilm_ph=Psi(l,m,theta,phi);\n Philm_th=-Psilm_ph;\n Philm_ph=+Psilm_th;\n return Philm_th, Philm_ph", "def polar(self):\n assert self.is_compact(), \"Not a polytope.\"\n\n verts = [list(v() - self.center()) for v in self.vertex_generator()]\n return Polyhedron(ieqs = [[1] + list(v) for v in verts], \n field = self.field())", "async def attitude_euler(self):\n\n request = telemetry_pb2.SubscribeAttitudeEulerRequest()\n attitude_euler_stream = self._stub.SubscribeAttitudeEuler(request)\n\n try:\n async for response in attitude_euler_stream:\n \n\n \n yield EulerAngle.translate_from_rpc(response.attitude_euler)\n finally:\n attitude_euler_stream.cancel()", "def _rotation_matrix_to_euler_angles(self, R):\n assert (self._is_rotation_matrix(R))\n\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n\n return np.array([x, y, z])", "def angles(self,compass=0,vertical=0,roll=0):\n self.matrix = makeMatrix(compass,vertical,roll)", "def add_euler_error(df):\n df[\"T_vehicle_attitude_setpoint_0__NF_e_roll\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_roll_body\"]\n - df[\"T_vehicle_attitude_0__NF_roll\"]\n )\n df[\"T_vehicle_attitude_setpoint_0__NF_e_pitch\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_pitch_body\"]\n - df[\"T_vehicle_attitude_0__NF_pitch\"]\n )\n df[\"T_vehicle_attitude_setpoint_0__NF_e_yaw\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_yaw_body\"]\n - df[\"T_vehicle_attitude_0__NF_yaw\"]\n )", "def list_eq_directions(hkl_figure, phi0, n):\n eq_directions = []\n for hkl_eq in cr.equivalent_directions(hkl_figure):\n phi, psi = cr.phi_psi_angles(hkl_eq, phi0, n)\n #if psi <= 90:\n d = {'hkl': hkl_tuple_to_str(hkl_eq),\n 'phi': phi,\n 'psi': psi}\n eq_directions.append(d)\n\n return eq_directions", "def angle(self, dates, values, angle_type):\n \n print(\"Angels running...\")\n exa_days = []\n exa_idx, extms = self.extrema(values, angle_type)\n for i in range(len(exa_idx)):\n exa_days.append(dates[exa_idx[i]])\n def_dates, def_point, k = self.calAng(exa_days, extms, angle_type)\n print(\"Angles done!\")\n return def_dates, def_point, k", "def get_angles(self):\n return self.chi, self.iota, self.eta", "def to_quaternion(self, method: str = 'chiaverini', **kw) -> np.ndarray:\n q = np.array([1., 0., 0., 0.])\n if method.lower()=='hughes':\n q = hughes(self.A)\n if method.lower()=='chiaverini':\n q = chiaverini(self.A)\n if method.lower()=='shepperd':\n q = shepperd(self.A)\n if method.lower()=='itzhack':\n q = itzhack(self.A, version=kw.get('version', 3))\n if method.lower()=='sarabandi':\n q = sarabandi(self.A, eta=kw.get('threshold', 0.0))\n return q/np.linalg.norm(q)", "def run_lpme(self) -> np.array:\n q = self.sphere.n\n signs = []\n for i in range(q):\n a = np.ones(q)\n a = a / np.sqrt(q)\n a_prime = np.copy(a)\n a_prime[i] = -a_prime[i]\n\n z_a = a * self.sphere.radius + self.sphere.origin\n z_a_prime = a_prime * self.sphere.radius + self.sphere.origin\n\n if self.oracle.compare(z_a, z_a_prime):\n signs.append(1.0)\n else:\n signs.append(-1.0)\n\n orthants = initialize_orthants(signs)\n\n # number of cycles\n nc = 4\n theta_list = [(orth.start + orth.stop) / 2 for orth in orthants]\n for _ in range(0, nc):\n for j in range(0, q - 1):\n theta_a = orthants[j].start\n theta_b = orthants[j].stop\n while abs(theta_b - theta_a) > self.e:\n theta_c = (theta_a * 3 + theta_b) / 4\n theta_d = (theta_a + theta_b) / 2\n theta_e = (theta_a + theta_b * 3) / 4\n\n theta_list[j] = theta_a\n vec_a = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_b\n vec_b = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_c\n vec_c = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_d\n vec_d = compute_vector(self.sphere, theta_list)\n\n theta_list[j] = theta_e\n vec_e = compute_vector(self.sphere, theta_list)\n\n # compare ac\n cac = self.oracle.compare(vec_a, vec_c)\n ccd = self.oracle.compare(vec_c, vec_d)\n cde = self.oracle.compare(vec_d, vec_e)\n ceb = self.oracle.compare(vec_e, vec_b)\n self.num_queries += 4\n\n if self.check_i:\n context = {\n \"theta_list\": theta_list,\n \"j\": j,\n \"theta_a\": theta_a,\n \"theta_b\": theta_b,\n \"theta_c\": theta_c,\n \"theta_d\": theta_d,\n \"theta_e\": theta_e,\n }\n self.check_inconsistency(cac, ccd, cde, ceb, context)\n\n if cac:\n theta_b = theta_d\n elif ccd:\n theta_b = theta_d\n elif cde:\n theta_a = theta_c\n theta_b = theta_e\n elif ceb:\n theta_a = theta_d\n else:\n theta_a = theta_d\n\n # update theta list\n theta_list[j] = (theta_a + theta_b) / 2\n\n # save theta list\n self.theta_list = theta_list\n return normalize(compute_vector(self.sphere, theta_list) - self.sphere.origin)", "def create_spiral(r1, r2, N):\n Pi = 3.141592\n points = []\n finished = [False]\n\n def rad(phi):\n return phi / (2 * Pi)\n\n def ang(rad):\n return 2 * Pi * rad\n\n def coord(phi):\n r = rad(phi)\n return (r * sin(phi), r * cos(phi))\n\n def fullcoord(phi, z):\n c = coord(phi)\n return [c[0], c[1], z]\n\n def dist(phi1, phi2):\n c1 = coord(phi1)\n c2 = coord(phi2)\n d = sqrt((c1[1] - c2[1]) ** 2 + (c1[0] - c2[0]) ** 2)\n return d\n\n def nextphi(phi):\n phi1 = phi\n phi2 = phi + 0.7 * Pi\n mid = phi2\n while abs(dist(phi, mid) - 1) > 0.00001:\n mid = (phi1 + phi2) / 2.\n if dist(phi, mid) > 1:\n phi2 = mid\n else:\n phi1 = mid\n return mid\n\n def prevphi(phi):\n\n phi1 = phi\n phi2 = phi - 0.7 * Pi\n mid = phi2\n\n while abs(dist(phi, mid) - 1) > 0.00001:\n mid = (phi1 + phi2) / 2.\n if dist(phi, mid) > 1:\n phi2 = mid\n else:\n phi1 = mid\n return mid\n\n def add_point(point, points=points, finished=finished):\n if (len(points) == N) or (finished[0] == True):\n points = np.array(points)\n finished[0] = True\n print(\"finished!!!\")\n else:\n points.append(point)\n\n z = 0\n forward = True\n curphi = ang(r1)\n add_point(fullcoord(curphi, z))\n while True:\n if finished[0] == True:\n return np.transpose(points)\n if forward == True:\n curphi = nextphi(curphi)\n add_point(fullcoord(curphi, z))\n if (rad(curphi) > r2):\n forward = False\n z += 1\n add_point(fullcoord(curphi, z))\n else:\n curphi = prevphi(curphi)\n add_point(fullcoord(curphi, z))\n if (rad(curphi) < r1):\n forward = True\n z += 1\n add_point(fullcoord(curphi, z))", "def euler_to_rodrigues(X_params):\n data_samples = X_params.shape[0]\n pose_euler = np.array([X_params[:, i:i+3] for i in range(0, 72, 3)])\n #print(pose_euler[0][0])\n #pose_euler = pose_euler.reshape((24, data_samples, 1, 3))\n #print(pose_euler[0][0])\n print(\"pose_euler shape: \" + str(pose_euler.shape))\n #R = np.array([[eulerAnglesToRotationMatrix(vector) for vector in vectors] for vectors in pose_euler])\n #print(\"R shape: \" + str(R.shape))\n #print(R[0][0])\n #R = R.reshape((data_samples, 24, 3, 3))\n\n #pose_params = np.array([[Rot.from_dcm(rot_mat).as_rotvec() for rot_mat in param_rot_mats] for param_rot_mats in R])\n pose_params = np.array([Rot.from_euler('xyz', vectors, degrees=False).as_rotvec() for vectors in pose_euler])\n print(\"pose_params shape: \" + str(pose_params.shape))\n pose_params = pose_params.reshape((data_samples, 72))\n print(\"pose_params shape: \" + str(pose_params.shape))\n print(\"other params shape: \" + str(X_params[:, 72:85].shape))\n X_params = np.concatenate([pose_params, X_params[:, 72:85]], axis=1)\n print(\"X_params shape: \" + str(X_params.shape))\n\n return X_params", "def calculate_and_encrypt_angles(self, positions: List):\n angles = []\n for pos in positions:\n vertex = self.__client_knowledge[pos]\n cmds = vertex.get_commands()\n for cmd in cmds:\n if cmd.name == 'M':\n # Calculate the adaptive angle\n signal_s = sum([self.__client_knowledge[pos].get_outcome() for pos in cmd.domain_s]) % 2\n signal_t = sum([self.__client_knowledge[pos].get_outcome() for pos in cmd.domain_t]) % 2\n adaptive_angle = (-1) ** signal_s * cmd.angle + signal_t * pi\n\n # Encrypt each angle with rotation and flipping method\n encrypted_angle = adaptive_angle + \\\n vertex.get_rotation_encryption_angle() + \\\n vertex.get_flipping_encryption_angle()\n angles.append(encrypted_angle)\n\n else:\n continue\n return angles", "def euler2rot3D(psi, theta, phi):\n Rphi = np.array([[np.cos(phi), np.sin(phi), 0],\n [-np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n Rtheta = np.array([[np.cos(theta), 0, -np.sin(theta)],\n [0, 1, 0],\n [np.sin(theta), 0, np.cos(theta)]])\n Rpsi = np.array([[np.cos(psi), np.sin(psi), 0],\n [-np.sin(psi), np.cos(psi), 0],\n [0, 0, 1]])\n return np.dot(Rpsi, np.dot(Rtheta, Rphi))", "def angles(self) -> list[npt.NDArray[np.float_]]:\n result = []\n a = cast(Segment, self.edges[-1])\n for b in self.edges:\n b = cast(Segment, b)\n result.append(angle(a.vertices[1], a.vertices[0], b.vertices[1]))\n a = b\n\n return result", "def angles(self):\n return self._angles", "def euler2quaternion(psi, theta, phi):\n if abs(psi) == 0 and abs(theta) == 0 and abs(phi) == 0:\n quaternion = np.array([1., 0., 0., 0.])\n else:\n R = euler2rot3D(psi, theta, phi)\n W = np.array([R[1, 2]-R[2, 1], R[2, 0]-R[0, 2], R[0, 1]-R[1, 0]])\n if W[0] >= 0:\n W /= np.linalg.norm(W)\n else:\n W /= np.linalg.norm(W) * -1\n theta = np.arccos(0.5 * (np.trace(R) - 1))\n CCisTheta = corrCoeff(R, angleAxis2rot3D(W, theta))\n CCisNegTheta = corrCoeff(R, angleAxis2rot3D(W, -theta))\n if CCisNegTheta > CCisTheta:\n theta = -theta\n quaternion = np.array([np.cos(theta/2.), np.sin(theta/2.)*W[0], np.sin(theta/2.)*W[1], np.sin(theta/2.)*W[2]])\n if quaternion[0] < 0:\n quaternion *= -1\n return quaternion", "def phi(self):\n if self._phi is None:\n b = self.stem + self.counter + self.radius + (\n self.stem - 2 * self.radius) - self.overlap\n c = sqrt(self.a ** 2 + b ** 2)\n _phi = atan2(self.a, b)\n b = sqrt(c ** 2 - self.radius ** 2)\n _phi += atan2(self.radius, b)\n self._phi = _phi\n return self._phi", "def rad_to_deg(angles, to_list=False, right_hand=False):\n angles = np.asarray(angles)\n angles *= 180 / np.pi\n\n if right_hand:\n angles[0] *= -1\n\n if to_list:\n angles = list(angles)\n\n return angles", "def __generate_LSP_angles__(self):\n self.LSP_ANGLES = np.linspace(0, self._range_lsp_angle, ArrayInfo.len_lsp) - (self._range_lsp_angle / 2)\n self.LSP_MIN_ANGLE = np.min(self.LSP_ANGLES) - 0.5 # Angles outside of this range are discarded\n self.LSP_MAX_ANGLE = np.max(self.LSP_ANGLES) + 0.5 # Angles outside of this range are discarded", "def Rpy(angle=0, units='deg'):\n\n if(units=='deg'):\n angle = angle*pi/180\n\n C = np.cos(angle)\n S = np.sin(angle)\n\n M = np.identity(3)\n\n M[0,0] = +C\n M[0,2] = +S\n M[2,0] = -S\n M[2,2] = +C\n\n return M", "def deg2rad(a):", "def _generate_panel_quaternions(self, et_start: float, et_end: float, step_s: float) -> List[MappsTimedQuaternion]:\n quaternions = []\n ets = range(int(et_start), int(et_end), step_s)\n n = len(ets)\n counter_pct = 0\n for i, et in enumerate(ets):\n if 100 * i / n > counter_pct:\n print(f\"Progress: {counter_pct} %\")\n counter_pct += 10\n JUICE_Y_in_J2000 = spy.spkcpt([0.0, 1.0, 0.0], self.probe, f\"{self.probe}_SPACECRAFT\", et, \"J2000\",\n \"OBSERVER\", \"NONE\", self.probe)[0][0:3]\n JUICE_SUN_in_J2000 = spy.spkpos(\"SUN\", et, \"J2000\", \"LT+S\", self.probe)[0]\n\n new_X, nY = self._find_new_XY_directions(JUICE_Y_in_J2000, JUICE_SUN_in_J2000)\n\n utc_time_string = spy.et2utc(et, \"ISOC\", 0) + \"Z\"\n quaternions.append(MappsTimedQuaternion(utc_time_string, *self._create_quaternion(new_X, nY)))\n\n return quaternions", "def phiprime_phi(phi):\n f = 0.0022927\n phiprime = np.arctan2(np.tan(phi*np.pi/180.),(1.-f)**2.)*180./np.pi\n return phiprime", "def LFEvtsHPS(masses, LFInfo, NSamp):\n mS, me = masses\n mK, mpi = LFInfo\n gN0 = gN(mK, mpi, mS)\n bN0 = bN(mK, mpi, mS)\n BM = BoostMat(bN0, gN0)\n\n EsAngles = []\n RFs = RFHPS(masses, NSamp)\n for ei in RFs:\n pmLF = np.dot(BM, ei[0])\n ppLF = np.dot(BM, ei[1])\n EsAngles.append(np.concatenate([pmLF, ppLF, [1.0]]))\n return EsAngles" ]
[ "0.6162403", "0.60886765", "0.5816294", "0.5785667", "0.5628353", "0.5463198", "0.54578865", "0.5440061", "0.5435098", "0.5419207", "0.53673875", "0.53652376", "0.5361059", "0.5337354", "0.5325969", "0.53146446", "0.52769536", "0.52645385", "0.52579445", "0.52271026", "0.52208894", "0.5220837", "0.5214414", "0.5214414", "0.5214414", "0.5214268", "0.5204542", "0.51765436", "0.5169213", "0.5155186", "0.514523", "0.5137997", "0.5127123", "0.51239145", "0.5115777", "0.51079357", "0.5107769", "0.5105735", "0.507797", "0.50762904", "0.50660086", "0.5061832", "0.50613034", "0.50607336", "0.5051198", "0.5046622", "0.5042468", "0.50284845", "0.5028325", "0.50279915", "0.5025223", "0.50208896", "0.5018237", "0.49926957", "0.49797115", "0.4969872", "0.49669516", "0.49643698", "0.49622706", "0.49463198", "0.49449465", "0.49405953", "0.49327365", "0.49300352", "0.49295625", "0.49288255", "0.4925335", "0.4923028", "0.49224675", "0.49208614", "0.49197155", "0.49189088", "0.49152285", "0.49100795", "0.49079794", "0.49058127", "0.4899866", "0.4894984", "0.4889409", "0.48833078", "0.48770648", "0.4871258", "0.4863327", "0.48552033", "0.48549607", "0.48546225", "0.48534584", "0.4851408", "0.48427314", "0.48395908", "0.48384735", "0.4833279", "0.48293334", "0.48247683", "0.48232692", "0.48187143", "0.48186073", "0.48169687", "0.48135155", "0.48099667" ]
0.6891914
0
Perform PCA on stack file and Get eigen images
def eigen_images_get(stack, eigenstack, mask, num, avg): from utilities import get_image a = Analyzers.get('pca_large') e = EMData() if(avg == 1): s = EMData() nima = EMUtil.get_image_count(stack) for im in xrange(nima): e.read_image(stack,im) e *= mask a.insert_image(e) if( avg==1): if(im==0): s = a else: s += a if(avg == 1): a -= s/nima eigenimg = a.analyze() if(num>= EMUtil.get_image_count(eigenimg)): num=EMUtil.get_image_count(eigenimg) for i in xrange(num): eigenimg.write_image(eigenstack,i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def images_pca(images_folder, limit=100, k=3):\n my_images = []\n shape = None\n files = os.listdir(images_folder)\n random.shuffle(files)\n files = files[:limit]\n for study_file in files:\n assert study_file.endswith('.pkl'), 'file %s has wrong extension' % study_file\n with open(os.path.join(images_folder, study_file), 'rb') as f:\n study = pickle.load(f)\n for slice_ in study['sax']:\n myframe = random.choice(study['sax'][slice_])\n assert shape is None or shape == myframe['pixel'].shape, 'inconsistent image shapes'\n shape = myframe['pixel'].shape\n my_images.append(myframe['pixel'])\n\n X = np.zeros((len(my_images), my_images[0].size))\n for i, img in enumerate(my_images):\n X[i] = img.reshape(img.size)\n\n V, eig = pca(X)\n V = V.reshape((k, shape[0], shape[1]))\n return V, eig", "def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self", "def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;", "def pca():\n pca = PCA()\n\n data = pca.fit_transform([[22,23,24],[23,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54]])\n\n print(data)", "def getPCA(data):\n #covM = np.cov(data.T) #note that np.cov define row as variables, col as observations\n #corM = np.corrcoef(data.T) # we will use correlation matrix instead of cov.\n covM = np.cov(data.T)\n eigvalue,eigvector = np.linalg.eig(covM) # each col of the eigvector matrix corresponds to one eigenvalue. So, each col is the coeff of one component\n pca = np.dot(data,eigvector) # each col is one pca, each row is one obs in that pca. \n return eigvalue,eigvector,pca", "def pca(self, X):\n return ImgCompression.svd(self, X)", "def compute_pca(image_set):\n\n # Check for valid input\n assert(image_set[0].dtype == np.uint8)\n\n # Reshape data into single array\n reshaped_data = np.concatenate([image\n for pixels in image_set for image in\n pixels])\n\n # Convert to float and normalize the data between [0, 1]\n reshaped_data = (reshaped_data / 255.0).astype(np.float32)\n\n # Calculate covariance, eigenvalues, and eigenvectors\n # np.cov calculates covariance around the mean, so no need to shift the\n # data\n covariance = np.cov(reshaped_data.T)\n e_vals, e_vecs = np.linalg.eigh(covariance)\n\n # svd can also be used instead\n # U, S, V = np.linalg.svd(mean_data)\n\n pca = np.sqrt(e_vals) * e_vecs\n\n return pca", "def pca(data):\n mean = data.sum(axis=0) / data.shape[0]\n # show_image(mean)\n cv_matrix = np.cov(data.T)\n e_values, e_vectors = la.eig(cv_matrix)\n return e_values, e_vectors.T, mean", "def pca(filename, class_col, sample):\n\n\tX = ml.read_file( filename )\n\n\t# Remove the class label from the dataset so that it doesn't prevent us from training a classifier in the future\n\tif class_col != None:\n\t\ttry:\n\t\t\tclassifier = ml.pd.DataFrame(X.iloc[:, class_col])\n\t\texcept:\n\t\t\tml.sys.exit('Class column out of range.')\n\t\tm = X.shape[1]\n\t\tkeepers = list(range(m))\n\t\tkeepers.pop( class_col )\n\n\t# Determine whether sample is present\n\tX_input = X.iloc[:, keepers]\n\n\t# # Visualize raw data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = X, x = X_input['Petal Length (cm)'], y = X_input['Petal Width (cm)'], color = 'k', alpha = 0.5).set(title = filename + ' raw')\n\n\t# Normalize features by Z-score (so that features' units don't dominate PCs), and apply PCA\n\tX_norm, X_mean, X_std = ml.z_norm(X_input)\n\tY, P, e_scaled = ml.pca_cov( X_norm )\n\n\t# Visualize 2D PC data\n\tml.plt.figure()\n\tml.sns.scatterplot(data = Y, x = Y.iloc[:, 0], y = Y.iloc[:, 1], alpha=0.5, color = 'k').set(title = 'PC 2D Projection')\n\n\t# Visualize PCs with heatmap and cree plot\n\tinfo_retention = ml.scree_plot( e_scaled )\n\tml.pc_heatmap( P, info_retention )\n\n\t# Reconstruct data\n\treconstruct(X_input, X_mean, X_std, Y, P, e_scaled, 2, 3)\n\n\tml.plt.show()", "def pca(frame,columns=[],k=320,frame_type='spark'):\n if frame_type == 'spark':\n # https://stackoverflow.com/questions/33428589/pyspark-and-pca-how-can-i-extract-the-eigenvectors-of-this-pca-how-can-i-calcu/33481471\n from numpy.linalg import eigh\n from pyspark.ml.linalg import Vectors\n from pyspark.ml.feature import VectorAssembler\n from pyspark.ml.feature import StandardScaler\n from pyspark.ml import Pipeline\n\n assembler = VectorAssembler(\n inputCols=columns,\n outputCol=\"features\")\n scaler = StandardScaler(inputCol=assembler.getOutputCol(),\n outputCol=\"scaledFeatures\",\n withStd=False,\n withMean=True)\n pipeline = Pipeline(stages=[assembler,scaler])\n model = pipeline.fit(frame)\n df = model.transform(frame)\n\n def estimateCovariance(df):\n \"\"\"Compute the covariance matrix for a given dataframe.\n\n Note:\n The multi-dimensional covariance array should be calculated using outer products. Don't\n forget to normalize the data by first subtracting the mean.\n\n Args:\n df: A Spark dataframe with a column named 'features', which (column) consists of DenseVectors.\n\n Returns:\n np.ndarray: A multi-dimensional array where the number of rows and columns both equal the\n length of the arrays in the input dataframe.\n \"\"\"\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()\n\n cov = estimateCovariance(df)\n col = cov.shape[1]\n eigVals, eigVecs = eigh(cov)\n inds = np.argsort(eigVals)\n eigVecs = eigVecs.T[inds[-1:-(col+1):-1]]\n components = eigVecs[0:k]\n eigVals = eigVals[inds[-1:-(col+1):-1]] # sort eigenvals\n score = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: np.dot(x, components.T) )\n\n #Show the Variance explained\n print('Vairance Explained:', sum(eigVals[0:k])/sum(eigVals) )\n\n # Return the `k` principal components, `k` scores, and all eigenvalues\n return components.T, score, eigVals\n elif frame_type in ['h2o','pandas']:\n raise Exception('Not Implemented yet.')", "def _pca(self):\n mean_beam = np.mean(self.beam_images, axis=1, keepdims=False)\n mask = self.mask\n beam_images = self.beam_images[:, :self.n_beam_images]\n\n # Subtract mean_beam from images and apply the mask. Element-wise\n # multiplication and subtraction using numpy broadcasting (as commented\n # out below) requires 3 large matrices in memory at an intermediate\n # point in the computation, namely right after (beam_images -\n # mean_beam_2d) is evaluated and memory for centered_masked_images is\n # allocated.\n # mask_2d = mask[:,np.newaxis]\n # mean_beam_2d = mean_beam[:,np.newaxis]\n # centered_masked_images = mask_2d * (beam_images - mean_beam_2d)\n\n # Instead of that direct approach, use self._center_and_mask_numba() or\n # self._center_and_mask_in_place(). As of this writing the _in_place\n # version is faster, but this may change in the future since the numba\n # version supports parallelization.\n centered_masked_images = self._center_and_mask_in_place(\n beam_images,\n mask,\n mean_beam,\n )\n # centered_masked_images should be C-contiguous already but it's good to\n # make sure.\n centered_masked_images = np.ascontiguousarray(centered_masked_images)\n\n # Compute the masked principal components\n # -1 since last eigenvector isn't necessarily orthogonal to the others.\n n_eigs = min(self.n_beam_images - 1, self.max_principal_components)\n n_eigs = max(n_eigs, 1) # Need at least one.\n # .T means transpose, @ means matrix multiplication.\n cov_mat = centered_masked_images.T @ centered_masked_images\n del centered_masked_images # Free up memory.\n if self.use_sparse_routines:\n variances, principal_components = eigsh(\n cov_mat, k=n_eigs, which='LM')\n else:\n eigvals_param = (\n self.n_beam_images - n_eigs,\n self.n_beam_images - 1)\n # overwrite_a might reduce memory usage\n variances, principal_components = eigh(\n cov_mat, eigvals=eigvals_param, overwrite_a=True)\n del cov_mat # Free up memory.\n\n # Reverse ordering to put largest eigenvectors/eigenvalues first\n principal_components = np.fliplr(principal_components)\n variances = np.flip(variances)\n\n # principal_components isn't always C-contiguous, and when it's not the\n # matrix multiplication below becomes extremely slow. It's much faster\n # to make it C-contiguous first so that numpy can use faster matrix\n # multiplication routines behind the scenes.\n principal_components = np.ascontiguousarray(principal_components)\n\n # Construct the un-masked basis vectors.\n centered_images = beam_images - mean_beam[:, np.newaxis]\n # centered_images should be C-contiguous already but it's good to make\n # sure.\n centered_images = np.ascontiguousarray(centered_images)\n principal_components = centered_images @ principal_components\n del centered_images # Free up memory.\n\n # As of this writing, self._normalize_vectorized() is faster than using\n # self._normalize_numba() despite the fact that the latter is uses numba\n # and allows for parallelization. That may change in the future though.\n principal_components = self._normalize_vectorized(\n principal_components,\n mask,\n )\n\n return mean_beam, principal_components, variances", "def principle_component_analysis(data_frame, dim=2):\n pca = PCA(n_components=dim)\n sc = StandardScaler()\n y = data_frame.loc[:, [\"Label\"]].values\n x = pd.DataFrame(data_frame[\"Vector\"].tolist())\n x = sc.fit_transform(x)\n principlecomponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(data=principlecomponents)\n data_frame[\"Vector\"] = principalDf.values.tolist()", "def PCA_subtraction(im, ref_lib, num_PCA_modes):\n print('Performing PCA background subtraction using {} modes'.format(num_PCA_modes))\n #concatenate input image into 1-D array\n im_x = im.shape[1]\n im_y = im.shape[0]\n \n im = im.ravel()\n\n num_PCA_modes = np.array(num_PCA_modes)\n \n # reads list of reference frames into data matrix by first concatenating the 2-D .fits images\n # into 1-D arrays and then row stacking these images into a 2-D np.array\n try:\n ref_frames = np.stack([fits.getdata(ref_lib[i]).ravel() for i in range(len(ref_lib))], axis=0)\n except:\n ref_frames = np.stack([ref_lib[i].ravel() for i in range(len(ref_lib))], axis=0)\n\n # subtracts the mean of each reference frame from each reference frame \n ref_frames_mean_sub = ref_frames - np.nanmean(ref_frames, axis=1)[:, None]\n ref_frames_mean_sub[np.where(np.isnan(ref_frames_mean_sub))] = 0\n \n # import pdb; pdb.set_trace()\n # creates covariance matrix from mean subtracted reference frames \n covar_psfs = np.cov(ref_frames_mean_sub)\n tot_basis = covar_psfs.shape[0]\n \n num_PCA_modes = np.clip(num_PCA_modes - 1, 0, tot_basis-1) # clip values, for output consistency we'll keep duplicates\n max_basis = np.max(num_PCA_modes) + 1 # maximum number of eigenvectors/KL basis we actually need to use/calculate\n \n # calculates eigenvalues and eigenvectors of the covariance matrix, but only the ones we need (up to max basis)\n evals, evecs = la.eigh(covar_psfs, eigvals=(tot_basis-max_basis, tot_basis-1))\n \n evals = np.copy(evals[::-1])\n evecs = np.copy(evecs[:,::-1], order='F') \n \n # calculates the PCA basis vectors\n basis_vecs = np.dot(ref_frames_mean_sub.T, evecs)\n basis_vecs = basis_vecs * (1. / np.sqrt(evals * (np.size(im) - 1)))[None, :] #multiply a value for each row\n \n #subtract off the mean of the input frame\n im_mean_sub = im - np.nanmean(im)\n \n # duplicate science image by the max_basis to do simultaneous calculation for different number of PCA modes\n im_mean_sub_rows = np.tile(im_mean_sub, (max_basis, 1))\n im_rows_selected = np.tile(im_mean_sub, (np.size(num_PCA_modes), 1)) # this is the output image which has less rows\n \n # bad pixel mask\n # do it first for the image we're just doing computations on but don't care about the output\n im_nanpix = np.where(np.isnan(im_mean_sub_rows))\n im_mean_sub_rows[im_nanpix] = 0\n # now do it for the output image\n im_nanpix = np.where(np.isnan(im_rows_selected))\n im_rows_selected[im_nanpix] = 0\n \n inner_products = np.dot(im_mean_sub_rows, np.require(basis_vecs, requirements=['F']))\n # select the KLIP modes we want for each level of KLIP by multiplying by lower diagonal matrix\n lower_tri = np.tril(np.ones([max_basis, max_basis]))\n inner_products = inner_products * lower_tri\n \n # make a model background for each number of basis vectors we actually output\n model = np.dot(inner_products[num_PCA_modes,:], basis_vecs.T)\n \n # subtract model from input frame for each number of PCA modes chosen\n PCA_sub_images = (im_rows_selected - model).reshape(np.size(num_PCA_modes), im_y, im_x)\n\n #Adding back in the mean to the model so that the model can be subtracted from the original image later. \n if type(num_PCA_modes) is np.int64:\n return PCA_sub_images[0], model.reshape(im_y, im_x)+np.nanmean(im)\n elif type(num_PCA_modes) is np.ndarray:\n return PCA_sub_images, model.reshape(np.size(num_PCA_modes), im_y, im_x)+np.nanmean(im)\n \n else:\n print('Unsupported datatype for variable: num_PCA_modes. Variable must be either int or 1-D np.ndarray')", "def get_pca_images(self):\n mean_vector, principal_components, variances = self.pca()\n shape = self.image_shape + (principal_components.shape[1],)\n principal_component_images = principal_components.reshape(shape)\n principal_component_images = np.moveaxis(\n principal_component_images, -1, 0)\n mean_beam = mean_vector.reshape(self.image_shape)\n mask = self.mask.reshape(self.image_shape)\n return mean_beam, mask, principal_component_images, variances", "def pca_detector(data):\n #- 'vol_shape' is the shape of volumes\n vol_shape = data.shape[:-1]\n #- 'n_vols' is the number of volumes\n n_vols = data.shape[-1]\n #- N is the number of voxels in a volume\n N = np.prod(vol_shape)\n\n #- Reshape to 2D array that is voxels by volumes (N x n_vols)\n # transpose to n_vols x N\n X = data.reshape((N, n_vols)).T\n\n \"\"\"\n The first part of the code will use PCA to get component matrix U\n and scalar projections matrix C\n \"\"\"\n\n #- Calculate unscaled covariance matrix for X\n unscaled_cov = X.dot(X.T)\n\n #- Use SVD to return U, S, VT matrices from unscaled covariance\n U, S, VT = npl.svd(unscaled_cov)\n\n #- Calculate the scalar projections for projecting X onto the vectors in U.\n #- Put the result into a new array C.\n C = U.T.dot(X)\n # set nans to 0\n C[np.isnan(C)] = 0\n #- Transpose C\n #- Reshape C to have the 4D shape of the original data volumes.\n C_vols = C.T.reshape((vol_shape + (n_vols,)))\n\n \"\"\"\n The second part of the code determines which voxels are inside the brain\n and which are outside the brain and creates a mask (boolean matrix)\n \"\"\"\n\n #get the mean voxel intensity of entire 4D object\n mean_voxel = np.mean(data)\n #get the mean volume (3D) across time series (axis 3)\n mean_volume = np.mean(data, axis=3)\n #boolean mask set to all voxels above .5 in the first volume\n #(.125 is the SPM criterion but .5 seems like a better threshold)\n mask = mean_volume > (.5 * mean_voxel) #threshold can be adjusted!\n out_mask = ~mask\n\n \"\"\"\n The third part of code finds the root mean square of U from step 1, then uses the\n mask from step 2 to determine which components explain data outside the brain\n Selects these \"bad components\" with high \"outsideness\"\n \"\"\"\n\n #Apply mask to C matrix to get all voxels outside of brain\n outside = C_vols[out_mask]\n #Get RMS of the voxels outside, reflecting \"outsideness\" of this scan\n RMS_out = np.sqrt(np.mean((outside ** 2), axis=0))\n\n #Apply mask to C matrix to get all voxels inside brain\n inside = C_vols[mask]\n #Get RMS of the voxels inside, reflecting \"insideness\" of this scan\n RMS_in = np.sqrt(np.mean((inside ** 2), axis=0))\n\n #The closer this ratio is to 1, the worse the volume\n RMS_ratio = RMS_out / RMS_in\n\n \"\"\"\n The fourth part of the code uses the \"bad components\" to generate a new\n \"bad data set\" and then puts this dataset through the outlier detector\n \"\"\"\n\n #Create a boolean mask for the 10% worst PCs (meaning highest RMS ratio)\n PC_bad = np.percentile(RMS_ratio, 90)\n PC_bad_mask = RMS_ratio > PC_bad\n\n U_bad = U[:, PC_bad_mask]\n C_bad = C[PC_bad_mask]\n\n #generates data set based on the bad PCs and (U and C matrices)\n X_bad = U_bad.dot(C_bad).T.reshape((vol_shape + (n_vols,)))\n\n # calculate outliers using iqr_detector\n _, outliers = mah_detector(X_bad)\n\n return X_bad, outliers", "def pca(image):\n # Reshape image.\n reshaped_image = np.reshape(image, (224 * 224, 3))\n # Find the covariance.\n cov = np.cov(reshaped_image, rowvar=0)\n # Eigenvalues and vectors.\n eigvals, eigvecs = np.linalg.eigh(cov)\n\n # Pick random gaussian values.\n a = np.random.normal(0, 0.1, size=(3,))\n\n scaled = eigvals * a\n delta = np.dot(eigvecs, scaled.T)\n return np.add(delta, scaled)", "def get_pca():\n from sklearn.decomposition import PCA\n return PCA()", "def load_pca(self, filepath):\n with open(filepath, 'rb') as f:\n image_shape = np.load(f)\n mean_beam = np.load(f)\n principal_components = np.load(f)\n variances = np.load(f)\n mask = np.load(f)\n\n image_shape = tuple(image_shape)\n if not self._initialised:\n self._init(np.empty(image_shape))\n elif self.image_shape != image_shape:\n msg = 'image shape does not match'\n raise ValueError(msg)\n self.set_mask(mask)\n self.pca_results = mean_beam, principal_components, variances", "def main(desc_key, fxyz, peratom, scale, pca_d, keep_raw=False, output=None, prefix='ASAP'):\n\n if output is None:\n output = prefix + \"-pca-d\" + str(pca_d) + '.xyz'\n peratom = bool(peratom)\n\n # read the xyz file\n frames = ase.io.read(fxyz, ':')\n n_frames = len(frames)\n print('load xyz file: ', fxyz, ', a total of ', str(n_frames), 'frames')\n\n # extract the descriptors from the file\n desc = []\n if n_frames == 1 and not peratom:\n raise RuntimeError('Per-config PCA not possible on a single frame')\n\n # retrieve the SOAP vectors --- both of these throw a ValueError if any are missing or are of wrong shape\n if peratom:\n desc = np.concatenate([a.get_array(desc_key) for a in frames])\n else:\n desc = np.row_stack([a.info[desc_key] for a in frames])\n\n # scale & center\n if scale:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n print('DEBUG: {}'.format(desc.shape))\n print(scaler.fit(desc))\n desc = scaler.transform(desc) # normalizing the features\n\n # fit PCA\n proj, pvec = pca(desc, pca_d)\n # could do with sklearn as well\n # from sklearn.decomposition import PCA\n # pca_sklearn = PCA(n_components=4) # can set svd_solver\n # proj = pca_sklearn.fit_transform(desc)\n # pvec = pca_sklearn.components_\n\n # add coords to info/arrays\n if peratom:\n running_index = 0\n for at in frames:\n n_atoms = len(at)\n at.arrays['pca_coord'] = proj[running_index:running_index + n_atoms, :].copy()\n running_index += n_atoms\n\n if not keep_raw:\n for at in frames:\n del at.arrays[desc_key]\n else:\n for i, at in enumerate(frames):\n at.info['pca_coord'] = proj[i]\n\n if not keep_raw:\n for at in frames:\n del at.info[desc_key]\n\n # save\n ase.io.write(output, frames, write_results=False)", "def pca(tiles: np.ndarray, n_components: int = 5) -> np.ndarray:\n ntiles = len(tiles)\n npix = tiles[0].shape[0]\n tiles = (tiles).reshape(ntiles, npix ** 2)\n # ensure data is mean-centred\n for idx in range(ntiles):\n tiles[idx] -= np.mean(tiles[idx])\n # compute principle components\n pca = PCA(n_components=n_components, whiten=True).fit(tiles)\n # reconstruct independent signals based on orthogonal components\n components = pca.transform(tiles)\n cleaned_tiles = pca.inverse_transform(components)\n return cleaned_tiles.reshape(ntiles, npix, npix)", "def run_pca(data_file, rs, n_components, outfile1, outfile2):\n print('running PCA with n_components={}'.format(n_components))\n day_batcher = DayBatcher(data_file, skiprow=1, delimiter=' ')\n mat = day_batcher.next_batch()\n rst = []\n while mat is not None:\n if mat.shape[1] == 13:\n # use compact10d\n datadict = {'features': mat[:, 3:],\n 'red': mat[:, 2],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n else:\n # use all_fixed\n datadict = {'features': mat[:, 14:],\n 'red': mat[:, 13],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n batch = scale(datadict['features'])\n pca = PCA(n_components=n_components, random_state=rs)\n pca.fit(batch)\n data_reduced = np.dot(batch, pca.components_.T) # pca transform\n data_original = np.dot(data_reduced, pca.components_) # inverse_transform\n pointloss = np.mean(np.square(batch - data_original), axis=1)\n loss = np.mean(pointloss)\n for d, u, t, l, in zip(datadict['day'].tolist(),\n datadict['user'].tolist(),\n datadict['red'].tolist(),\n pointloss.flatten().tolist()):\n rst.append((u, d, l, t))\n mat = day_batcher.next_batch()\n train_rst, test_rst = split_train_test(rst)\n save_rst(train_rst, outfile1)\n save_rst(test_rst, outfile2)\n eval_cr(test_rst, 'pca')", "def runPCA(data, reducedDimensions, showScree):\n print(\"-->Running PCA.\")\n latent = gp.pca(data['features'], reducedDimensions, showScree, savePlots)\n plot(latent, data['colours'], reducedDimensions, \"Iris Dataset\", \"PCA\")", "def get3dPCA(data):\n\n return PCA(n_components = 3).fit_transform(data)", "def PCA (numpy_cloud ):\r\n\r\n # abort, if there are no points\r\n if (numpy_cloud.shape[0] == 0):\r\n #print (\"In normals.py, in PCA: The input array is empty. Returning a null vector and high sigma\")\r\n return np.array ((0, 0, 0)), 1.0, np.array ((0, 0, 0))\r\n\r\n # we only need three colums [X, Y, Z, I] -> [X, Y, Z]\r\n numpy_cloud = numpy_cloud[:, :3].copy () # copying takes roughly 0.000558 seconds per 1000 points\r\n cloud_size = numpy_cloud.shape[0]\r\n\r\n # get covariance matrix\r\n a_transposed_a, mass_center = build_covariance_matrix (numpy_cloud )\r\n\r\n # get normal vector and smallest eigenvalue\r\n normal_vector, smallest_eigenvalue = eigenvalue_decomposition (a_transposed_a )\r\n\r\n # the noise is based on the smallest eigenvalue and normalized by number of points in cloud\r\n noise = smallest_eigenvalue\r\n if (cloud_size <= 3 or noise < 1 * 10 ** -10):\r\n sigma = noise # no noise with 3 points\r\n else:\r\n sigma = sqrt(noise/(cloud_size - 3) )\r\n\r\n return normal_vector, sigma, mass_center", "def sparsepca(X, n_comp):\n n_samples, n_features = X.shape\n # center the data. Note we only do the global centering. i.e. earch column\n # is centered to zero mean. Though the data should already 'locally'\n # centered since each row is normalized to z score. \n X = X - X.mean(axis = 0)\n estimator = decomposition.SparsePCA(n_components=n_comp, alpha=0.8, max_iter = 100, n_jobs = 20, verbose = 1, tol = 1e-2)\n t0 = time()\n estimator.fit(X)\n train_time = (time() - t0)\n print \"done in %0.3fs\" % train_time\n components_ = estimator.components_\n X_projected = estormator.transform(X)\n \n return components_, X_projected", "def apply_PCA(data, ncomp):\n import sklearn.decomposition as dc\n \n pca = dc.PCA(n_components=ncomp, whiten=False, svd_solver='full')\n cps = pca.fit_transform(data)\n svl = pca.singular_values_\n return cps,pca,svl", "def PCA(data, n=2):\n U, S, Vt = np.linalg.svd(data, full_matrices=False)\n s = np.diag(S)\n newdata = np.dot(U[:, :n], np.dot(s[:n, :n], Vt[:n,:]))\n return newdata", "def do_PCA_and_save(activations_dir, save_dir, seed=None):\n if seed is None:\n seed = set_seed()\n\n layers = ['layer_1', 'layer_2', 'layer_3', 'layer_4', 'layer_5', 'layer_6',\n 'layer_7', 'layer_8']\n\n # Number of Principal Components\n n_components = 100\n\n if not op.exists(save_dir):\n os.makedirs(save_dir)\n\n for layer in tqdm(layers):\n regex = activations_dir + '/*' + layer + '.npy'\n activations_file_list = sorted(glob.glob(regex))\n feature_dim = np.load(activations_file_list[0])\n x = np.zeros((len(activations_file_list), feature_dim.shape[0]))\n for i, activation_file in enumerate(activations_file_list):\n temp = np.load(activation_file)\n x[i, :] = temp\n x_train = x[:1000, :]\n x_test = x[1000:, :]\n\n x_test = StandardScaler().fit_transform(x_test)\n x_train = StandardScaler().fit_transform(x_train)\n ipca = PCA(n_components=n_components, random_state=seed)\n ipca.fit(x_train)\n\n x_train = ipca.transform(x_train)\n x_test = ipca.transform(x_test)\n train_save_path = op.join(save_dir, \"train_\" + layer)\n test_save_path = op.join(save_dir, \"test_\" + layer)\n np.save(train_save_path, x_train)\n np.save(test_save_path, x_test)", "def performPCA(dataSet, numShapesInDataset, numPointsInShapes, num_components):\n\tdataMat = np.array(dataSet).reshape((numShapesInDataset, numPointsInShapes*2))\n\t\n\t\"\"\"Creating the covariance matrix\"\"\"\n\tcovarMat = np.cov(dataMat.T)\n\t\t\n\t\"\"\"Generating the eigen vectors and eigen values\"\"\"\n\teigVals, eigVecs = np.linalg.eig(covarMat)\n\n\t\"\"\"Taking the first num_components eigen vectors and values, and the center of the space.\"\"\"\n\tprincipleComponents = np.real(eigVecs[:, 0:num_components])\n\tprincipleValues = np.real(eigVals[0:num_components])\n\tmeanShape = dataMat.mean(0).reshape((numPointsInShapes * 2, 1))\n\treturn principleComponents, principleValues, meanShape", "def pca(self):\n if not self._initialised and not self.cache_valid:\n msg = \"No reference images added or previously computed PCA basis loaded\"\n raise RuntimeError(msg)\n if not self.cache_valid:\n self.pca_results = self._pca()\n self.cache_valid = True\n return self.pca_results", "def PCA(X, dims_rescaled_data=21):\n # pca = decomposition.PCA(n_components=3)\n # x_std = StandardScaler().fit_transform(X)\n # a = pca.fit_transform(x_std)\n\n R = np.cov(X, rowvar=False)\n evals, evecs = scipy.linalg.eigh(R)\n idx = np.argsort(evals)[::-1]\n evecs = evecs[:,idx]\n\n evals = evals[idx]\n evecs = evecs[:, :dims_rescaled_data]\n\n newX = np.dot(evecs.T, X.T).T\n\n return newX #, evals, evecs", "def pca(X, k):\n n, dim = X.shape\n\n # Center the data\n X_mean = np.mean(X, axis = 0)\n X = X - X_mean\n # Get the covariance matrix\n covariance_matrix = np.dot(X.T, X) / (n - 1)\n eigval, eigvec = eigs(covariance_matrix, k)\n return np.array(eigvec), np.array(eigval)", "def extract_features(img):\n # load models\n model = FeatureExtractor(CFG)\n model.load_model()\n feature_extractor = model.feature_extractor()\n\n # extract features \n print(type(img))\n extracted_features = feature_extractor.predict([img])\n\n # reduce dimension\n pca_model = joblib.load(PCA_MODEL_DIRECTORY)\n reduced_img = pca_model.transform(extracted_features)\n return reduced_img", "def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s", "def PCA(X, k):\n cov = np.matmul(np.matrix.transpose(X), X)\n w, v = np.linalg.eig(cov)\n k_largest = np.argsort(w)[::-1][:k]\n v = np.matrix.transpose(v)\n U = v[k_largest]\n S = w[k_largest]\n return U, S", "def doPCA(df, grouping_variable, features_to_analyse, plot_save_dir=None, PCs_to_keep=10):\n \n data = df[features_to_analyse]\n \n # Normalise the data before PCA\n zscores = data.apply(zscore, axis=0)\n \n # Drop features with NaN values after normalising\n colnames_before = list(zscores.columns)\n zscores.dropna(axis=1, inplace=True)\n colnames_after = list(zscores.columns)\n nan_cols = [col for col in colnames_before if col not in colnames_after]\n if len(nan_cols) > 0:\n print(\"Dropped %d features with NaN values after normalization:\\n%s\" %\\\n (len(nan_cols), nan_cols))\n\n print(\"\\nPerforming Principal Components Analysis (PCA)...\")\n \n # Fit the PCA model with the normalised data\n pca = PCA()\n pca.fit(zscores)\n \n # Project data (zscores) onto PCs\n projected = pca.transform(zscores) # A matrix is produced\n # NB: Could also have used pca.fit_transform()\n\n # Plot summary data from PCA: explained variance (most important features)\n important_feats, fig = pcainfo(pca, zscores, PC=1, n_feats2print=10) \n \n if plot_save_dir:\n # Save plot of PCA explained variance\n PCAplotroot = Path(plot_save_dir) / 'PCA'\n PCAplotroot.mkdir(exist_ok=True, parents=True)\n PCAplotpath = PCAplotroot / ('control_variation_in_' + \n grouping_variable + \n '_PCA_explained.eps')\n savefig(PCAplotpath, tight_layout=True, tellme=True, saveFormat='eps')\n plt.pause(2); plt.close()\n else:\n PCAplotpath=None\n plt.show(); plt.pause(2); plt.close()\n \n # Store the results for first few PCs in dataframe\n projected_df = pd.DataFrame(projected[:,:PCs_to_keep],\n columns=['PC' + str(n+1) for n in range(PCs_to_keep)]) \n \n # Add concatenate projected PC results to metadata\n projected_df.set_index(df.index, inplace=True) # Do not lose video snippet index position\n \n df = pd.concat([df, projected_df], axis=1)\n\n # Plot PCA - Variation in control data with respect to a given variable (eg. date_recording_yyyymmdd)\n \n # 2-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_explained', \n '_PCA_2_components'))\n title = \"2-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=2)\n plt.pause(2); plt.close()\n \n # 3-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_2_components', \n '_PCA_3_components'))\n title = \"3-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=3, rotate=False)\n plt.pause(2)\n \n return df", "def analyse_pca(cluster, three_dim=True):\n # create data array and name array:\n A = cluster.data_matrix\n names = cluster.row_header\n\n # assign colours to samples:\n colorconvert = {'F':'go', 'S':'co', 1:'ro', 2:'go', 3:'ko', 4:'bo', 5:'co', 6:'mo', 7:'yo', 8:'r<', 9:'g<', 10:'k<', 11:'b<', 12:'c<', 13:'m<', 14:'y<', 15:'rs', 16:'gs', 17:'ks', 18:'bs', 19:'cs', 20:'ms', 21:'ys' }\n colourlist = []\n for name in names:\n phase = re.search(\"(F|S)\", name)\n if phase is not None:\n #print phase.groups()[0]\n colourlist.append(colorconvert[phase.groups()[0]])\n else:\n colourlist.append('ko')\n #print names, \"\\n\", colourlist\n\n ############# PCA using numpy SVD decomposition ##################################\n print \"#\" * 30\n print \"SVA analysis\"\n U, s, Vt = numpy.linalg.svd(A, full_matrices=True)\n V = Vt.T\n\n # sort the PCs by descending order of the singular values (i.e. by the\n # proportion of total variance they explain)\n ind = numpy.argsort(s)[::-1]\n U = U[:, ind]\n s = s[ind]\n V = V[:, ind]\n S = numpy.diag(s)\n\n sumval = sum([ i ** 2 for i in s ])\n\n # if we use all of the PCs we can reconstruct the noisy signal perfectly\n\n # Mhat = numpy.dot(U, numpy.dot(S, V.T))\n # if we use only the first 2 PCs the reconstruction is less accurate\n # Mhat2 = numpy.dot(U[:, :2], numpy.dot(S[:2, :2], V[:,:2].T))\n\n # To remove the variance of the 1st PC, which is primarily associated with experimenter:\n matrix_reduced = numpy.dot(U[:,1:], numpy.dot(S[1:,1:], V[:,1:].T))\n #for checking decomposition is occurring properly:\n #print numpy.shape(U)\n #print numpy.shape(S)\n #print numpy.shape(Vt)\n #print numpy.shape(matrix_reduced)\n\n #print \"#\" * 30\n #print \"SVD eigenvectors/loadings:\"\n #print header[:var_num] , \"\\n\"\n #print U # need to work out appropriate way to calculate loadings!\n #print \"#\" * 30\n #print \"checking distance of loadings (eigen vectors)\"\n #for col in loadings[:,:]:\n # print col\n # print numpy.sqrt(sum([ a ** 2 for a in col ]))\n\n print \"PCA explained variance:\"\n print [ (z ** 2 / sumval) for z in s ]\n\n # * if M is considered to be an (observations, features) matrix, the PCs\n # themselves would correspond to the rows of S^(1/2)*V.T. if M is\n # (features, observations) then the PCs would be the columns of\n # U*S^(1/2).\n\n #q_scores = numpy.dot(numpy.sqrt(S), V.T)\n q_scores = numpy.dot(U, numpy.sqrt(S))\n\n pp = PdfPages(cluster.exportPath[0:-4] + '.PCA.pdf')\n if three_dim: # plot a three dimensional graph:\n fig = plt.figure(1)\n ax = fig.add_subplot(111, projection='3d')\n for idx in range(len(colourlist)):\n xs = q_scores[idx,0]\n ys = q_scores[idx,1]\n zs = q_scores[idx,2]\n name = re.search('[FS][LP][0-9]+',names[idx]).group(0)\n ax.scatter(xs, ys, zs, c=colourlist[idx][0], marker='o')\n ax.text(xs, ys, zs, name)\n\n ax.set_xlabel(\"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval))\n ax.set_ylabel(\"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval))\n ax.set_zlabel(\"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval))\n\n plt.savefig(pp, format='pdf')\n plt.show()\n else: # plot two 2D graphs instead:\n for idx in range(len(colourlist)):\n fig = plt.figure(1)\n\n sub1 = fig.add_subplot(2,1,1)\n sub1.plot(q_scores[idx,0], q_scores[idx,1], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC2 (%.2f%%)\" % (100.0 * (s[1]**2)/sumval) )\n sub1.annotate( names[idx], xy=(q_scores[idx,0], q_scores[idx,1]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n sub2 = fig.add_subplot(2,1,2)\n sub2.plot(q_scores[idx,0], q_scores[idx,2], colourlist[idx])\n plt.xlabel( \"PC1 (%.2f%%)\" % (100.0 * (s[0]**2)/sumval) )\n plt.ylabel( \"PC3 (%.2f%%)\" % (100.0 * (s[2]**2)/sumval) )\n sub2.annotate( names[idx], xy=(q_scores[idx,0],q_scores[idx,2]),xytext=(-15,10), xycoords='data', textcoords='offset points' )\n\n plt.savefig(pp, format='pdf')\n plt.show()\n\n plt.close()\n return matrix_reduced", "def _load_protein_matrices(yaml_file, protein_name):\n prj = ProteinSeries(yaml_file)\n prt = Protein(prj, protein_name)\n\n key_mapping, assignment_matrix = create_assignment_matrix(prt.fixed_assignments)\n tics_mapping , tics_array = create_tics_array(prt.fixed_assignments, prt.kmeans_mdl,\n prt.tica_data)\n\n return prj, prt, key_mapping, assignment_matrix, tics_mapping, tics_array", "def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX", "def deep_learning_pca(data, param, mean_subtract=True, display=True, indices=True, pool='None', pool_size=2, \\\n labels=[], method='none', random_sampling=0, matrixA='single', decomp=['svd','svd','svd']): \n layer_size = len(param)\n\n # turning data shape into (image depth, total number of images, image height, image width)\n # for example (3, 10000, 32, 32)\n layer_in = np.zeros((data.shape[3], data.shape[0], data.shape[1], data.shape[2]))\n for i in range(data.shape[3]):\n layer_in[i,:,:,:] = data[:,:,:,i]\n \n U_stack = []\n means_stack = []\n \n for layer_i in range(layer_size):\n ### input shape: k_pre x N x 100 x 100\n ### output shape: k_pre x k_post x N x num_sub1\n # setup parameters\n subsize, stride, k_post = param[layer_i]\n k_pre = layer_in.shape[0]\n N = layer_in.shape[1]\n img_size = layer_in.shape[2]\n\n if random_sampling == 0:\n row_sub = int((img_size - subsize) / stride + 1)\n else:\n row_sub = random_sampling\n\n num_sub = row_sub*row_sub\n\n if display:\n print('\\n----------- Layer {} -----------'.format(layer_i)) \n print('image size: {:3d}'.format(img_size))\n print('subimage size: {:3d}'.format(subsize))\n print('stride: {:3d}'.format(stride))\n print('k: {:3d}'.format(k_post))\n print('number of subimages per row:', row_sub)\n print('\\nlayer {} in: {}'.format(layer_i, layer_in.shape))\n\n layer_out = np.zeros((k_pre, k_post, N, num_sub))\n\n # if indices is true then add 2 more rows\n indices_row = indices*2\n\n U = np.zeros((k_pre, subsize*subsize + indices_row, k_post))\n means = np.zeros((k_pre))\n\n\n for i1 in range(k_pre):\n ### input: N, size, size\n ### output: N, k, num_sub\n\n # img_sub_combined: subsize*subsize+indices_row,num_sub*N\n # 841, 16 * N\n img_sub_combined = take_subimages_combined(layer_in[i1,:,:,:], subsize, stride, indices=indices, random_sampling=random_sampling)\n img_sub_combined = img_sub_combined.reshape((subsize*subsize+indices_row,num_sub*N))\n\n # img_sub_stack : N,subsize*subsize+indices_row,num_sub\n # N, 841, 16\n img_sub_stack = take_subimages_stack(layer_in[i1,:,:,:], subsize, stride, indices=indices, random_sampling=random_sampling)\n \n # Subtract Mean\n if mean_subtract:\n img_mean = np.mean(img_sub_combined)\n else:\n img_mean = 0\n\n # new matrix with means subtracted\n img_sub_combined_mean_subtract = img_sub_combined - img_mean\n\n # append mean to means array\n means[i1] = img_mean\n\n # calculate U matrix from combined\n U[i1, :, :], _, _ = svd(img_sub_combined_mean_subtract, k_post, labels=labels, method=method)\n \n # initialize output\n Ut_subimages = np.zeros((N, k_post, num_sub + indices_row))\n\n # U transpose x subimage matrix\n for data_i in range(N):\n Ut_subimages[data_i,:,:] = np.matmul(U[i1, :, :].transpose(), (img_sub_stack[data_i,:,:] - img_mean))\n \n # swap axes - after swap: (k_in, k, N, num_sub)\n layer_out[i1,:,:,:] = np.swapaxes(np.copy(Ut_subimages), 0, 1)\n\n if mean_subtract:\n means_stack.append(means)\n\n # Append U to U_stack\n U_stack.append(U)\n\n if display:\n print('layer {} out: {}'.format(layer_i, layer_out.shape))\n print('U shape:', U.shape)\n\n ### change for the next layer\n # calculate the next image size\n img_size = row_sub\n\n # k_pre = layer_out.shape[0]\n # k_post = layer_out.shape[1] \n\n layer_in = np.copy(layer_out.reshape((k_pre*k_post, N, img_size, img_size)))\n\n if random_sampling == 0:\n if pool.lower() != 'none':\n img_size = math.ceil(img_size/pool_size)\n layer_in_max_pool = np.zeros((k_pre*k_post, N, img_size, img_size))\n for i in range(layer_in.shape[0]):\n for j in range(layer_in.shape[1]):\n layer_in_max_pool[i,j,:,:] = pool_2d(layer_in[i,j,:,:], pool=pool, pool_size=pool_size)\n layer_in = np.copy(layer_in_max_pool)\n\n if matrixA == 'combine':\n if layer_i == 0:\n A = _make_matrixA(layer_out, k_pre, k_post, num_sub, N)\n else:\n A_ = _make_matrixA(layer_out, k_pre, k_post, num_sub, N)\n A = np.append(A, A_, axis=0)\n # take random sampling (if not 0) only at the first layer\n random_sampling = 0 \n \n num_sub = layer_out.shape[3]\n \n # construct matrix A from the last output\n if matrixA != 'combine':\n A = _make_matrixA(layer_out, k_pre, k_post, num_sub, N)\n\n if display:\n print('\\nA shape:', A.shape)\n \n return A, U_stack, means_stack", "def get_features_from_pca(feat_num, feature):\n\n if feature == 'HoG':\n vocab = np.load('vocab_hog.npy')\n elif feature == 'SIFT':\n vocab = np.load('vocab_sift.npy')\n\n # Your code here. You should also change the return value.\n\n def _get_PCA_vectors(feat_num, vocab):\n\n mean = vocab.mean(axis=0, keepdims=True)\n vocab_normalized = vocab - np.multiply(np.ones([vocab.shape[0], mean.shape[0]]),\n mean)\n #TEST: mean unit test\n #mean = vocab_normalized.mean(axis=0, keepdims=True)\n\n cov_matrix = np.cov(np.transpose(vocab_normalized))\n sigma, V = np.linalg.eig(cov_matrix)\n order_sigma = np.argsort(sigma)\n\n PCA_vectors = []\n i = 1\n for f in range(len(order_sigma)):\n eigen_vector = V[:, order_sigma[i]]\n if all(True for _ in np.isreal(eigen_vector)):\n PCA_vectors.append(np.real(eigen_vector))\n i += 1\n if len(PCA_vectors) == feat_num:\n break\n\n return np.array(PCA_vectors)\n\n #MAIN\n PCA_vectors = _get_PCA_vectors(feat_num, vocab)\n\n d = np.dot(vocab, np.transpose(PCA_vectors))\n\n return np.dot(vocab, np.transpose(PCA_vectors))\n #return np.zeros((vocab.shape[0],2))", "def fitPCAimg(coef=None, data = None, maxcomp = None):\n img = data[:,2:].T\n size = np.sqrt(img.shape[0])\n pca = PCA(n_components=20)\n imgBasis = pca.fit_transform(img)\n nimg = img.shape[1]", "def pca_reduction(X, ncomp=20):\n print('Performing dimensionality reduction ...')\n\n # PCA fitting\n pca = PCA(n_components=ncomp)\n weights = pca.fit_transform(X)\n basis = pca.components_\n\n # # Plot cumsum(explained_variance) versus component\n # plt.semilogy(pca.explained_variance_ratio_*100, 's')\n # plt.ylabel('Explained Variance Ratio (%)', size=20)\n # plt.xticks(size=20)\n # plt.xlabel('Component', size=20)\n # plt.yticks(size=20)\n # plt.show()\n\n print('Explained variance ratio : '+str(round(np.cumsum(pca.explained_variance_ratio_)[-1]*100, 2))+' %.')\n\n # pickle.dump(pca, '/../Data/GPmodel/pca_'+str(ncomp))\n\n # Some plots on PCA\n # plot_pca(basis, weights)\n\n return pca, weights", "def getPCA(matrix):\n eVal, eVec = np.linalg.eigh(matrix)\n indices = eVal.argsort()[::-1] # arguments for sorting eVal desc\n eVal, eVec = eVal[indices], eVec[:, indices]\n eVal = np.diagflat(eVal)\n return eVal, eVec", "def parcellate_PCA(matrix, mat_type, path_pref, rot='quartimax', eigval_thr=1):\n if rot == 'quartimax':\n rotation = 0.0\n elif rot == 'varimax':\n rotation = 1.0\n else:\n raise Exception('This factor rotation type is not handled')\n # To have more than just a reference of matrix in mat\n mat = matrix + 0\n # Get the eigenvalues and eigenvectors of the\n # mat = cov(2D_connectivity_matrix)\n # gamma_eigval, omega_eigvec = np.linalg.eig(mat)\n u, gamma_eigval, omega = np.linalg.svd(mat, full_matrices=True)\n # SVD third output is the transposed of the eigen vectors\n omega_eigvec = omega.T\n if mat_type == \"covariance\":\n comp_thr = eigval_thr * np.mean(gamma_eigval)\n elif mat_type == \"correlation\":\n comp_thr = eigval_thr\n else:\n raise Exception('This factor rotation type is not handled')\n\n # Sort the Gamma_eigval in decreasing order of magnitude, and sort\n # the order of the eigenvectors accordingly\n indsort = np.argsort(gamma_eigval)[::-1]\n\n # The SSQ_loadings is equal to the eigenvalues of the SM (cov(data))\n # They correspond to the values in the 'Extraction Sum of Squared\n # loadings' in SPSS\n gamma_eigval_sort = gamma_eigval[indsort]\n omega_eigvec_sort = omega_eigvec[:,indsort]\n\n # We keep only the components which have an eigenvalue above comp_thr\n keep = np.where(gamma_eigval_sort > comp_thr)\n ind = 0\n while gamma_eigval_sort[ind] > comp_thr:\n ind += 1\n gamma_eigval_sort = gamma_eigval_sort[:ind]\n omega_eigvec_sort = omega_eigvec_sort[:,:ind]\n\n SSQ_loadings = gamma_eigval_sort\n # The matrix of factor laodings (like in SPSS)\n Lambda = omega_eigvec_sort.dot(np.diag(np.sqrt(np.abs(gamma_eigval_sort))))\n print(pd.DataFrame(Lambda))\n # SPSS: The rescaled loadings matrix\n Lambda_rescaled = np.dot(np.sqrt(np.diag(np.diag(cov))), Lambda)\n\n # SPSS: communalities\n h = [np.sum(gamma_eigval*(omega_eigvec[i]**2)) for i in range(len(omega_eigvec))]\n\n lambda_rot = rotate_components(Lambda, q = 1000, gamma=rotation)\n print(pd.DataFrame(lambda_rot))\n # Get sum of squared loadings\n SSQ_loadings_rot = np.sum(lambda_rot**2, axis=0)\n print(pd.DataFrame(SSQ_loadings_rot))\n # Sort the SSQ_loadings_rot in descending order to prepare for the\n # power fitting\n SSQ_loadings_rot_sorted = np.sort(SSQ_loadings_rot)\n SSQ_loadings_rot_sorted_descending = SSQ_loadings_rot_sorted[::-1]\n\n # --------------------------------------------------------------------------\n # (5) Fit a power law to the sorted SSQ_Loadings_rot to Estimate\n # the number of relevant factors Npc using the fitpower function in\n # do_PCA_utilities.py (only the first 50 SSQ_Loadings are considered).\n # Returns the number of components to consider: Npc\n # --------------------------------------------------------------------------\n npc = fit_power(SSQ_loadings_rot_sorted_descending)\n print('\\n Power fitting of the eigenvalues associated with the rotated')\n print('loadings estimated the presence of ' + str(npc) + ' clusters \\n')\n\n\n # --------------------------------------------------------------------------\n # (6) Rotate Lambda_Npc = Lambda[:,Npc]\n # Returns the final Factor loadings, defining the clusters\n # --------------------------------------------------------------------------\n lambda_npc = Lambda[:, 0:npc]\n\n return (lambda_rot, npc)\n # return (lambda_npc, npc)", "def project_to_eigenvectors(X, vecs):\n\n return (X-np.mean(X, axis=0)).dot(np.transpose(vecs)) #PCA assumes that the data is centered, so we need to do that before doing the calculations", "def pca(cube, angle_list, cube_ref=None, scale_list=None, ncomp=1, ncomp2=1,\n svd_mode='lapack', scaling=None, adimsdi='double', mask_center_px=None,\n source_xy=None, delta_rot=1, fwhm=4, imlib='opencv',\n interpolation='lanczos4', collapse='median', check_mem=True,\n full_output=False, verbose=True, debug=False):\n if not cube.ndim > 2:\n raise TypeError('Input array is not a 3d or 4d array')\n\n if check_mem:\n input_bytes = cube.nbytes\n if cube_ref is not None:\n input_bytes += cube_ref.nbytes\n if not check_enough_memory(input_bytes, 1.5, False):\n msgerr = 'Input cubes are larger than available system memory. '\n msgerr += 'Set check_mem=False to override this memory check or '\n msgerr += 'use the incremental PCA (for ADI)'\n raise RuntimeError(msgerr)\n\n start_time = time_ini(verbose)\n\n angle_list = check_pa_vector(angle_list)\n #***************************************************************************\n # ADI + mSDI. Shape of cube: (n_channels, n_adi_frames, y, x)\n #***************************************************************************\n if cube.ndim == 4:\n if adimsdi == 'double':\n res_pca = _adimsdi_doublepca(cube, angle_list, scale_list, ncomp,\n ncomp2, scaling, mask_center_px, debug,\n svd_mode, imlib, interpolation,\n collapse, verbose, start_time,\n full_output)\n residuals_cube_channels, residuals_cube_channels_, frame = res_pca\n elif adimsdi == 'single':\n res_pca = _adimsdi_singlepca(cube, angle_list, scale_list, ncomp,\n scaling, mask_center_px, debug,\n svd_mode, imlib, interpolation,\n collapse, verbose, start_time,\n full_output)\n cube_allfr_residuals, cube_adi_residuals, frame = res_pca\n else:\n raise ValueError('`Adimsdi` mode not recognized')\n # **************************************************************************\n # ADI+RDI\n # **************************************************************************\n elif cube.ndim == 3 and cube_ref is not None:\n res_pca = _adi_rdi_pca(cube, cube_ref, angle_list, ncomp, scaling,\n mask_center_px, debug, svd_mode, imlib,\n interpolation, collapse, verbose, full_output,\n start_time)\n pcs, recon, residuals_cube, residuals_cube_, frame = res_pca\n # **************************************************************************\n # ADI. Shape of cube: (n_adi_frames, y, x)\n # **************************************************************************\n elif cube.ndim == 3 and cube_ref is None:\n res_pca = _adi_pca(cube, angle_list, ncomp, source_xy, delta_rot, fwhm,\n scaling, mask_center_px, debug, svd_mode, imlib,\n interpolation, collapse, verbose, start_time, True)\n\n if source_xy is not None:\n recon_cube, residuals_cube, residuals_cube_, frame = res_pca\n else:\n pcs, recon, residuals_cube, residuals_cube_, frame = res_pca\n\n else:\n msg = 'Only ADI, ADI+RDI and ADI+mSDI observing techniques are '\n msg += 'supported'\n raise RuntimeError(msg)\n\n if cube.ndim == 3:\n if full_output:\n if source_xy is not None:\n return recon_cube, residuals_cube, residuals_cube_, frame\n else:\n return pcs, recon, residuals_cube, residuals_cube_, frame\n else:\n return frame\n elif cube.ndim == 4:\n if full_output:\n if adimsdi == 'double':\n return residuals_cube_channels, residuals_cube_channels_, frame\n elif adimsdi == 'single':\n return cube_allfr_residuals, cube_adi_residuals, frame\n else:\n return frame", "def emulator(pca, gp_model, params):\n # Weights prediction\n pred_weights = gp_predict(gp_model, params)\n\n # Inverse PCA (pred_weights * basis + mean)\n reconstructed = pca.inverse_transform(pred_weights)\n return reconstructed", "def apply_pca(X: numpy.ndarray, pca: sklearn.decomposition.PCA):\n output = pca.transform(X)\n return output", "def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)", "def plot_PCA(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n utils.construct_set(dir, pattern, pattern_valid)\n X, y = utils.load_all()\n utils.plot_PCA(X, y)\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')", "def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy", "def dimensionality_reduction(train_frame,valid_frame=None,test_frame=None,columns=[],n_comp=320,random_seed=420,decompositions_to_run=['PCA','TSVD','ICA','GRP','SRP'],frame_type='spark',test_does_have_y=False,only_return_decompositions=False,id_col='ID', column_name=None):\n if frame_type == 'spark':\n from pyspark.ml.feature import PCA\n from pyspark.ml.linalg import Vectors\n from pyspark.ml.feature import VectorAssembler\n # from pyspark.ml.feature import VectorDisassembler\n from pyspark.ml.feature import StandardScaler\n from pyspark.ml import Pipeline\n\n train_df, valid_df, test_df = None,None,None\n train_df = train_frame\n if valid_frame:\n valid_df = valid_frame\n if test_frame:\n test_df = test_frame\n\n assembler = VectorAssembler(\n inputCols=columns,\n outputCol=\"features\")\n scaler = StandardScaler(inputCol=assembler.getOutputCol(),\n outputCol=\"scaledFeatures\",\n withStd=False,\n withMean=True)\n pca = PCA(k=n_comp, inputCol=scaler.getOutputCol(), outputCol=\"pcaFeatures\")\n pipeline = Pipeline(stages=[assembler,scaler, pca])\n\n #define a function for extracting pca vector column into their own columns\n def extract_vectors(row):\n \"\"\"\n Takes a vector and extracts it into many columns from the vector.\n pcaFeatures is the vector being extracted in this function.\n Vector values will be named _2, _3, ...\n \"\"\"\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return tuple(float(x) for x in row.pcaFeatures.values)\n\n #define a function for extracting pca vector column into their own columns\n def extract_vectors_with_id_col(row):\n \"\"\"\n Takes a vector and extracts it into many columns from the vector.\n pcaFeatures is the vector being extracted in this function.\n Vector values will be named _2, _3, ...\n \"\"\"\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.pcaFeatures.values)\n\n def rename_columns(dataframe,new_prefix='pca_',old_colomn_starting_index=2,new_column_starting_index=1):\n \"\"\"\n Takes a spark df and renames all columns to something like pca_1\n from the previously named columns.\n \"\"\"\n old_column_index = old_colomn_starting_index\n new_column_index = new_column_starting_index\n for i in range(0,n_comp):\n if column_name:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),column_name+'_'+new_prefix+str(new_column_starting_index))\n else:\n dataframe = dataframe.withColumnRenamed('_'+str(old_colomn_starting_index),new_prefix+str(new_column_starting_index))\n old_colomn_starting_index+=1\n new_column_starting_index+=1\n return dataframe\n\n #Do PCA tranformation for training data\n model_train = pipeline.fit(train_frame)\n result_train = model_train.transform(train_frame)\n extracted_pca_train = result_train.rdd.map(extract_vectors_with_id_col).toDF([id_col])\n extracted_pca_train = rename_columns(extracted_pca_train)\n\n #Do PCA tranformation for validation data if it was given\n extracted_pca_valid = None\n model_valid = None #Will need this to fit test if it doesn't have y values\n if valid_frame:\n model_valid = pipeline.fit(valid_frame)\n result_valid = model_train.transform(valid_frame)\n extracted_pca_valid = result_valid.rdd.map(extract_vectors_with_id_col).toDF([id_col])\n extracted_pca_valid = rename_columns(extracted_pca_valid)\n\n #Do PCA tranformation for test data if it was given\n extracted_pca_test = None\n if test_frame:\n model_test = pipeline.fit(test_frame)\n result_test = model_test.transform(test_frame)\n extracted_pca_test = result_test.rdd.map(extract_vectors_with_id_col).toDF([id_col])\n extracted_pca_test = rename_columns(extracted_pca_test)\n ###\n ### SVD ###\n ###\n # https://stackoverflow.com/questions/33428589/pyspark-and-pca-how-can-i-extract-the-eigenvectors-of-this-pca-how-can-i-calcu/33500704#33500704\n # https://github.com/apache/spark/blob/master/examples/src/main/python/mllib/svd_example.py\n # https://blog.dominodatalab.com/pca-on-very-large-neuroimaging-datasets-using-pyspark/\n from pyspark.mllib.linalg.distributed import RowMatrix\n from pyspark.mllib.linalg.distributed import IndexedRow, IndexedRowMatrix\n from pyspark.mllib.linalg import DenseVector\n\n def extract_svd_vectors_with_id_col(row):\n \"\"\"\n Takes a vector and extracts it into many columns from the vector.\n pcaFeatures is the vector being extracted in this function.\n Vector values will be named _2, _3, ...\n \"\"\"\n # tuple(x for x in row if x not in ['pcaFeatures'])+\n return (row[id_col],)+tuple(float(x) for x in row.svdFeatures.values)\n\n if 'SVD' in decompositions_to_run:\n #Train first\n mat = IndexedRowMatrix(result_train.rdd.map(lambda row: IndexedRow(row[id_col],DenseVector(row['pcaFeatures']))))\n svd = mat.computeSVD(n_comp, computeU=True)\n U = svd.U # The U factor is a RowMatrix.\n s = svd.s # The singular values are stored in a local dense vector.\n V = svd.V\n # Print vectors for testing\n# collected = U.rows.collect()\n# print(\"U factor is:\")\n# for vector in collected:\n# print(vector)\n# print(\"Singular values are: %s\" % s)\n# print(\"V factor is:\\n%s\" % V)\n extracted_svd_train = U.rows.map(lambda x: (x, )).toDF().rdd.map(lambda x: (x['_1'][0],x['_1'][1] )).toDF([id_col,'svdFeatures']).rdd.map(extract_svd_vectors_with_id_col).toDF([id_col])\n extracted_svd_train = rename_columns(extracted_svd_train,new_prefix='svd_')\n if valid_frame:\n mat = IndexedRowMatrix(result_valid.rdd.map(lambda row: IndexedRow(row[id_col],DenseVector(row['pcaFeatures']))))\n svd = mat.computeSVD(n_comp, computeU=True)\n U = svd.U # The U factor is a RowMatrix.\n s = svd.s # The singular values are stored in a local dense vector.\n V = svd.V # The V factor is a local dense matrix.\n extracted_svd_valid = U.rows.map(lambda x: (x, )).toDF().rdd.map(lambda x: (x['_1'][0],x['_1'][1] )).toDF([id_col,'svdFeatures']).rdd.map(extract_svd_vectors_with_id_col).toDF([id_col])\n extracted_svd_valid = rename_columns(extracted_svd_valid,new_prefix='svd_')\n if test_frame:\n mat = IndexedRowMatrix(result_valid.rdd.map(lambda row: IndexedRow(row[id_col],DenseVector(row['pcaFeatures']))))\n svd = mat.computeSVD(n_comp, computeU=True)\n U = svd.U # The U factor is a RowMatrix.\n s = svd.s # The singular values are stored in a local dense vector.\n V = svd.V # The V factor is a local dense matrix.\n extracted_svd_test = U.rows.map(lambda x: (x, )).toDF().rdd.map(lambda x: (x['_1'][0],x['_1'][1] )).toDF([id_col,'svdFeatures']).rdd.map(extract_svd_vectors_with_id_col).toDF([id_col])\n extracted_svd_test = rename_columns(extracted_svd_test,new_prefix='svd_')\n\n if only_return_decompositions:\n train_df = train_df.select(id_col)\n if valid_df:\n train_df = valid_df.select(id_col)\n if test_df:\n test_df = test_df.select(id_col)\n if 'PCA' in decompositions_to_run:\n train_df = extracted_pca_train.join(train_df,id_col,'inner')\n if valid_df:\n valid_df = extracted_pca_valid.join(valid_df,id_col,'inner')\n if test_df:\n test_df = extracted_pca_test.join(test_df,id_col,'inner')\n if 'SVD' in decompositions_to_run:\n train_df = extracted_svd_train.join(train_df,id_col,'inner')\n if valid_df:\n valid_df = extracted_svd_valid.join(valid_df,id_col,'inner')\n if test_df:\n test_df = extracted_svd_test.join(test_df,id_col,'inner')\n # return the right number of frames\n if valid_frame:\n if test_frame:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),valid_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),test_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n else:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),valid_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n else:\n if test_frame:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures'),test_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n else:\n return train_df.drop('features','scaledFeatures','pcaFeatures','svdFeatures')\n\n elif frame_type in ['h2o','pandas']:\n from sklearn.random_projection import GaussianRandomProjection\n from sklearn.random_projection import SparseRandomProjection\n from sklearn.decomposition import PCA, FastICA\n from sklearn.decomposition import TruncatedSVD\n import pandas as pd\n\n train_df, test_df, valid_df = None, None, None\n if frame_type == 'h2o':\n # convert to pandas\n train_df = train_frame.as_data_frame()\n if valid_frame:\n valid_df = valid_frame.as_data_frame()\n test_df = test_frame.as_data_frame()\n elif frame_type == 'pandas':\n train_df = training_frame\n if valid_frame:\n valid_df = valid_frame\n test_df = test_frame\n\n train_df = train_df[columns]\n if valid_frame:\n valid_df = valid_df[columns]\n test_df = test_df[columns]\n\n\n tsvd_results_train, tsvd_results_valid, tsvd_results_test = None, None, None\n if 'TSVD' in decompositions_to_run:\n tsvd = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_train = tsvd.fit_transform(train_df)\n tsvd_results_valid, tsvd_results_test = None, None\n if valid_frame:\n tsvd2 = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_valid = tsvd2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n tsvd3 = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_test = tsvd3.fit_transform(test_df)\n else:\n tsvd_results_test = tsvd2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n tsvd3 = TruncatedSVD(n_components=n_comp, random_state=random_seed)\n tsvd_results_test = tsvd3.fit_transform(test_df)\n else:\n tsvd_results_test = tsvd.transform(test_df)\n\n #PCA\n pca_results_train, pca_results_valid, pca_results_test = None, None, None\n if 'PCA' in decompositions_to_run:\n pca = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_train = pca.fit_transform(train_df)\n if valid_frame:\n pca2 = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_valid = pca2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n pca3 = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_test = pca3.fit_transform(test_df)\n else:\n pca_results_test = pca2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n pca3 = PCA(n_components=n_comp, random_state=random_seed)\n pca_results_test = pca3.fit_transform(test_df)\n else:\n pca_results_test = pca.transform(test_df)\n\n # ICA\n ica_results_train, ica_results_valid, ica_results_test = None, None, None\n if 'ICA' in decompositions_to_run:\n ica = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_train = ica.fit_transform(train_df)\n if valid_frame:\n ica2 = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_valid = ica2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n ica3 = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_test = ica3.fit_transform(test_df)\n else:\n ica_results_test = ica2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n ica3 = FastICA(n_components=n_comp, random_state=random_seed)\n ica_results_test = ica3.fit_transform(test_df)\n else:\n ica_results_test = ica.transform(test_df)\n\n\n # GRP\n grp_results_train, grp_results_valid, grp_results_test = None, None, None\n if 'GRP' in decompositions_to_run:\n grp = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_train = grp.fit_transform(train_df)\n if valid_frame:\n grp2 = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_valid = grp2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n grp3 = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_test = grp3.fit_transform(test_df)\n else:\n grp_results_test = grp2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n grp3 = GaussianRandomProjection(n_components=n_comp,eps=0.1, random_state=random_seed)\n grp_results_test = grp3.fit_transform(test_df)\n else:\n grp_results_test = grp.transform(test_df)\n\n # SRP\n srp_results_train, srp_results_valid, srp_results_test = None, None, None\n if 'SRP' in decompositions_to_run:\n srp = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_train = srp.fit_transform(train_df)\n if valid_frame:\n srp2 = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_valid = srp2.fit_transform(valid_df)\n if test_frame:\n if test_does_have_y:\n srp3 = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_test = srp3.fit_transform(test_df)\n else:\n srp_results_test = srp2.transform(test_df)\n else:\n if test_frame:\n if test_does_have_y:\n srp3 = SparseRandomProjection(n_components=n_comp, dense_output=True, random_state=random_seed)\n srp_results_test = srp3.fit_transform(test_df)\n else:\n srp_results_test = srp.transform(test_df)\n\n if only_return_decompositions:\n train_df = pd.DataFrame()\n if valid_frame:\n valid_df = pd.DataFrame()\n if test_frame:\n test_df = pd.DataFrame()\n for i in range(1, n_comp + 1):\n if 'PCA' in decompositions_to_run:\n train_df['pca_' + str(i)] = pca_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = pca_results_valid[:, i - 1]\n if test_frame:\n test_df['pca_' + str(i)] = pca_results_test[:, i - 1]\n\n if 'ICA' in decompositions_to_run:\n train_df['ica_' + str(i)] = ica_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = ica_results_valid[:, i - 1]\n if test_frame:\n test_df['ica_' + str(i)] = ica_results_test[:, i - 1]\n\n if 'TSVD' in decompositions_to_run:\n train_df['tsvd_' + str(i)] = tsvd_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = tsvd_results_valid[:, i - 1]\n if test_frame:\n test_df['tsvd_' + str(i)] = tsvd_results_test[:, i - 1]\n\n if 'GRP' in decompositions_to_run:\n train_df['grp_' + str(i)] = grp_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = grp_results_valid[:, i - 1]\n if test_frame:\n test_df['grp_' + str(i)] = grp_results_test[:, i - 1]\n\n if 'SRP' in decompositions_to_run:\n train_df['srp_' + str(i)] = srp_results_train[:, i - 1]\n if valid_frame:\n valid_df['pca_' + str(i)] = srp_results_valid[:, i - 1]\n if test_frame:\n test_df['srp_' + str(i)] = srp_results_test[:, i - 1]\n\n if frame_type == 'pandas':\n if valid_frame:\n if test_frame:\n return (train_df, valid_df, test_df)\n else:\n return (train_df, valid_df)\n else:\n if test_frame:\n return (train_df, test_df)\n else:\n return (train_df)\n elif frame_type == 'h2o':\n # convert back to h2o\n import h2o\n print('Converting to H2OFrame ...')\n # convert train back to h2o\n training_frame = h2o.H2OFrame(train_df)\n training_frame.columns = list(train_df)\n # conserve memory\n del train_df\n testing_frame = None\n if test_frame:\n # convert test back to h2o\n testing_frame = h2o.H2OFrame(test_df)\n testing_frame.columns = list(test_df)\n # conserve memory\n del test_df\n validation_frame = None\n if valid_frame:\n # convert test back to h2o\n validation_frame = h2o.H2OFrame(valid_df)\n validation_frame.columns = list(valid_df)\n # conserve memory\n del valid_df\n\n print('Done.')\n\n if valid_frame:\n if test_frame:\n return training_frame, validation_frame, testing_frame\n else:\n return training_frame, validation_frame\n else:\n if test_frame:\n return training_frame, testing_frame\n else:\n return training_frame", "def chainercv_preprocess(image):\n image = skio.imread(image)\n image = image.transpose(2, 0, 1)\n return [image]", "def test():\r\n \r\n data = 'f_canny'\r\n dest = os.path.join(r\"D:\\Data\\test1\",data)\r\n \r\n if not os.path.exists(dest):\r\n os.makedirs(dest)\r\n \r\n # Loading the trained StandardScaler\r\n pickle_in2 = open(r'C:\\PythonCodes\\MM803\\code\\Outputs\\New\\f_canny_SS.sav',\"rb\")\r\n SS = pickle.load(pickle_in2)\r\n \r\n #pickle_in3 = open(r'C:\\PythonCodes\\MM803\\code\\Outputs\\New\\f_hed_IPCA.sav',\"rb\")\r\n #IPCA = pickle.load(pickle_in3)\r\n \r\n # insert at 1, 0 is the script path (or '' in REPL)\r\n sys.path.insert(1, r\"C:\\PythonCodes\\MM803\\code\\feature_enhancement\\hed\")\r\n import feature_enhancement.feature_enhancement_feature_only as features\r\n\r\n base = r\"C:\\PythonCodes\\MM803\\TestImages\\cropped\"\r\n testset = ['t1','t7','t10', 't16','t18','t27']\r\n for folder in testset:\r\n imagepath = os.path.join(base,folder)\r\n dest = os.path.join(base,\"canny_\"+folder)\r\n labels = pd.DataFrame(columns = [\"filename\",\"classname\"])\r\n col1 = []\r\n col2 = []\r\n if not os.path.exists(dest):\r\n os.makedirs(dest)\r\n imagelist = os.listdir(imagepath)\r\n print((imagelist))\r\n for images in imagelist:\r\n imagename = os.path.join(imagepath,images)\r\n \r\n filename = images[:-4]\r\n \r\n # Reading and processing image\r\n image = cv2.imread(imagename,cv2.COLOR_BGR2GRAY)\r\n \r\n gray_img = cv2.imread(imagename,0)\r\n blurred = cv2.GaussianBlur(gray_img,(3,3),0)\r\n canny_edge = features.canny(image,blurred,alpha = 1, beta=0.6)\r\n \r\n cv2.imwrite(os.path.join(dest,filename+\"_canny.png\"),canny_edge)\r\n \r\n col1.append(imagename)\r\n col2.append(filename)\r\n feature_matrix_test = create_feature_matrix1(dest, 200,200)\r\n labels = pd.DataFrame({'filename': col1, 'label': col2}) \r\n \r\n labels.to_csv(r\"C:\\PythonCodes\\MM803\\code\\Mid_Outputs\\test_\" + folder+\"_f_canny_labels.csv\", sep='\\t', encoding='utf-8', index = False, header = False)\r\n labels_df = pd.read_csv(r\"C:\\PythonCodes\\MM803\\code\\Mid_Outputs\\test_\" + folder+\"_f_canny_labels.csv\", sep = '\\t', header=None)\r\n y_test_new = labels_df[1]\r\n \r\n SS.transform(np.asarray(feature_matrix_test))\r\n ## X_test_new = IPCA.transform(feature_matrix_test)\r\n X_test_new = feature_matrix_test\r\n \r\n for name, clf in classifiers:\r\n print(\"\\nClassification by \", name, \" on test set \", folder)\r\n print(\"_______________________________________\")\r\n pickle_in1 = open(r'C:\\PythonCodes\\MM803\\code\\Outputs\\New\\f_canny_'+name+'.sav',\"rb\")\r\n inc = pickle.load(pickle_in1)\r\n \r\n for i in range(len(imagelist)):\r\n print('Score on Test Images:', imagelist[i][:-4],\" \", inc.score(X_test_new[i:i+1], y_test_new[i:i+1]))\r\n \r\n print(\"Predicted Classes\",inc.predict(X_test_new))\r\n print(\"Actual Classes \\n\\n\", y_test_new)", "def eigen_decomposition(X, features):\n # Center to average\n Xctr = X - X.mean(0)\n # covariance matrix\n Xcov = np.cov(Xctr.T)\n\n # Compute eigenvalues and eigenvectors\n eigen_values, eigen_vectors = sp.linalg.eigh(Xcov)\n\n # Sort the eigenvalues and the eigenvectors descending\n sortedindex = np.argsort(eigen_values)[::-1]\n eigen_values = eigen_values[sortedindex]\n eigen_vectors = eigen_vectors[:, sortedindex]\n\n ###########\n y_pos = np.arange(len(features))\n weight = eigen_vectors[0]\n\n figure, axis = plt.subplots(2, 1)\n\n axis[0].bar(features, eigen_vectors[0])\n plt.setp(axis[0], title=\"First and Second Component's Eigenvectors \", ylabel='Weight')\n axis[0].set_xticks(features, features)\n axis[1].bar(features, eigen_vectors[1])\n axis[1].set_xticks(features, features)\n plt.setp(axis[1], ylabel='Weight')\n # axis[0].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[0].xticks(y_pos, features)\n # axis[0].ylabel('Weight')\n # axis[0].title('Features')\n #\n # axis[1].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[1].xticks(y_pos, features)\n # axis[1].ylabel('Weight')\n # axis[1].title('Features')\n\n plt.show()\n # return eigen_values, eigen_vectors", "def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat", "def pca(X_train, X_test, n):\n\n print \"Extracting %d principle components from %d features\" % \\\n (n, X_train.shape[1])\n t0 = time()\n pca = RandomizedPCA(n_components=n, whiten=True, random_state=47).fit(X_train)\n print \"done in %0.3fs\" % (time() - t0)\n \n print \"Transforming the input data\"\n t0 = time()\n X_train_pca = pca.transform(X_train)\n X_test_pca = pca.transform(X_test)\n print \"done in %0.3fs\" % (time() - t0)\n\n return X_train_pca, X_test_pca", "def get_pca_principal_component_images(self):\n return self.get_pca_images()[2]", "def reduce_dimension(positives, negatives, to_return=True, fv_len=10,\n new_pca=True):\n\n features = dict() \n \n # namapovani na numpy matice pro PCA\n X = np.vstack((np.vstack(positives), np.vstack(negatives)))\n Y = np.vstack((np.vstack([1]*len(positives)), np.vstack([-1]*len(negatives)))) \n \n print \"Data shape: \", X.shape, Y.shape, len(positives[0])\n \n # ulozeni puvodnich dat do souboru\n #dr.save_obj(parentname + \"/\" + childname + \"/raw_data.pklz\")\n \n # PCA\n if new_pca or pca is None:\n pca = PCA(n_components=fv_len) # vytvori PCA\n #pca = DEC(n_components=fv_len) # vytvori PCA\n pca.fit(X, Y)\n \n reduced = pca.transform(X) # redukuje dimenzi vektoru priznaku\n \n # znovu namapuje na zavedenou strukturu\n features = list(reduced)\n \n # ulozeni PCA\n #dataset.save_obj(pca, self.PCA_path+\"/PCA_\"+self.descriptor_type+\".pkl\")\n\n if to_return: return pca, features", "def run_pca(df, cols=None): \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n import mpld3\n\n # Define and markers to use for different categories\n groups_dict = {(u'D', 0):('Germany, unregulated', 'g', 'o'),\n (u'N', 0):('Norway, unregulated', 'b', 'o'),\n (u'D', 1):('Germany, regulated', 'g', '^'),\n (u'N', 1):('Norway, regulated', 'b', '^')}\n \n # Extract cols of interest\n cats = df[['country', 'regulated']]\n\n if cols:\n df = df[cols].astype(float)\n\n # Standardise the feature data\n feat_std = StandardScaler().fit_transform(df)\n\n # Setup PCA. Initially, choose to keep ALL components\n pca = PCA()\n\n # Fit model\n pca.fit(feat_std)\n\n # Get explained variances (in %)\n var_exp = 100*pca.explained_variance_ratio_\n cum_exp = np.cumsum(var_exp)\n\n # Get eigenvalues\n cov_mat = np.cov(feat_std.T)\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n # Get number of EVs > 1 (Kaiser-Guttman criterion)\n # and print summary\n n_kgc = (eig_vals > 1).sum()\n print 'Variance explained by first %s PCs (%%):\\n' % n_kgc\n print var_exp[:n_kgc]\n print '\\nTotal: %.2f%%' % var_exp[:n_kgc].sum()\n \n # Plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n \n # Explained variance\n axes[0].bar(range(1, len(var_exp)+1), var_exp, \n align='center', label='Individual components')\n axes[0].plot(range(1, len(cum_exp)+1), cum_exp, \n 'r-o', label='Cumulative')\n axes[0].set_xlabel('Principal component')\n axes[0].set_ylabel('Variance explained (%)')\n axes[0].legend(loc='center right')\n \n # Eigenvalues\n axes[1].plot(range(1, len(eig_vals)+1), np.sort(eig_vals)[::-1], \n 'r-o', label='Eigenvalues')\n axes[1].axhline(1, c='k', ls='-', label='Kaiser-Guttman threshold')\n axes[1].set_xlabel('Principal component')\n axes[1].set_ylabel('Eigenvalue')\n axes[1].legend(loc='upper right') \n \n # PC loadings\n loads = pd.DataFrame(data=pca.components_, \n columns=df.columns,\n index=range(1, pca.components_.shape[0]+1)).T\n\n # Project into 2 and 3 components\n fig = plt.figure(figsize=(16, 6))\n \n # Plot 2 components\n ax = fig.add_subplot(1, 2, 1)\n \n # Refit the PCA, this time specifying 2 components\n # and transforming the result\n feat_reduced = PCA(n_components=2).fit_transform(feat_std)\n \n # Build df \n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n\n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], s=60,\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2])\n \n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_title('First two PCA directions')\n \n # Plot 3 components\n ax = fig.add_subplot(1, 2, 2, projection='3d', \n elev=-150, azim=135)\n\n # Refit the PCA, this time specifying 3 components\n # and transforming the result\n feat_reduced = PCA(n_components=3).fit_transform(feat_std)\n\n # Build df with colours\n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'PC3':feat_reduced[:, 2],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n \n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], group['PC3'],\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2],\n s=60)\n \n ax.set_title('First three PCA directions')\n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_zlabel('Third principal component')\n ax.legend(bbox_to_anchor=(0.15, -0.1), frameon=True)\n plt.show()\n\n return loads", "def princomp(A):\n # computing eigenvalues and eigenvectors of covariance matrix\n M = (A-np.mean(A.T,axis=1)).T # subtract the mean (along columns)\n [latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted\n score = np.dot(coeff.T,M) # projection of the data in the new space\n return coeff,score,latent", "def eval_pos_affine():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n annotations_dir = root_dir + 'annotations/'\n affine_dir = root_dir + \"affine_orig/\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n image_file = os.listdir(os.path.join(seq_dir, seq))[0]\n image = cv2.imread(os.path.join(seq_dir, seq, image_file))\n orig_h, orig_w = image.shape[:2]\n\n with open(os.path.join(affine_dir, seq+'.pickle'), 'rb') as fin:\n affine_dict = pickle.load(fin)\n\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for i in range(len(bbox)):\n # convert to std resolution\n bbox[i][:, 0] = bbox[i][:, 0]\n bbox[i][:, 1] = bbox[i][:, 1]\n bbox[i][:, 2] = bbox[i][:, 2]\n bbox[i][:, 3] = bbox[i][:, 3]\n\n # for j in range(bbox[i].shape[0]):\n # bbox[i][j] = tlwh_to_tlbr(bbox[i][j])\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n # i-1 to i M\n frame_name = \"{:07d}.jpg\".format(int(frame_id[idx][i-1]))\n M = affine_dict[frame_name]\n bbox_infer = tlwh(mean)\n bbox_infer = tlwh_to_tlbr(bbox_infer)\n bbox_expand = np.ones((3, 4))\n bbox_expand[:2, 0] = bbox_infer[:2]\n bbox_expand[:2, 1] = bbox_infer[2:]\n # tr\n bbox_expand[:2, 2] = bbox_infer[2], bbox_infer[1]\n # bl\n bbox_expand[:2, 3] = bbox_infer[0], bbox_infer[3]\n bbox_expand = np.dot(M, bbox_expand)\n for t in range(bbox_expand.shape[1]):\n bbox_expand[:2, t] /= bbox_expand[2, t]\n # bbox_infer[:2] = bbox_expand[:2, 0]\n # bbox_infer[2:] = bbox_expand[:2, 1]\n # get the out bounding bbox\n bbox_infer[0] = min(bbox_expand[0, :])\n bbox_infer[1] = min(bbox_expand[1, :])\n bbox_infer[2] = max(bbox_expand[0, :])\n bbox_infer[3] = max(bbox_expand[1, :])\n bbox_infer = tlbr_to_tlwh(bbox_infer)\n # print(bbox_infer)\n trace_predict_bbox.append(bbox_infer)\n # move = mean[:4] - tlwh_to_xyah(bbox_infer)\n # if np.sum(np.square(move)[:2]) > 32*32:\n # print(move)\n # print(idx, frame_name)\n # print(mean)\n mean[:4] = tlwh_to_xyah(bbox_infer)\n # print(mean)\n mean, covariance = kalman_filter.predict(mean, covariance)\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i) * 100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('iou(*100)')\n plt.show()", "def read_images(path, sz=None, cr=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n\n if filename.endswith('.jpg'):\n try:\n im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)\n #print os.path.join(subject_path, filename)\n # crop the image on the face\n if (cr is not None):\n rect, img = detect(im)\n if len(rect) == 0:\n return [None,None]\n im = img[rect[0][1]:rect[0][3], rect[0][0]:rect[0][2]]\n \n #im = Image.fromarray(img)\n # resize to given size (if given)\n if (sz is not None):\n #print im, sz\n im = cv2.resize(im, sz)\n cv2.imwrite('../data_pictures/prova'+str(c)+'.jpg',im)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n\n\n c = c+1\n return [X,y]", "def my_pca(data_matrix, k):\n cov_matrix = np.cov(data_matrix.transpose())\n \n eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)\n eigenvalues.sort()\n # sorts the eigenvalues in ascending order\n decending_eigenvalues = eigenvalues[-k:][::-1]\n # choose the highest k values and change the order to decending\n \n evalues, evectors = np.linalg.eig(cov_matrix)\n \n index_list = []\n for i in decending_eigenvalues:\n indexes = np.where(i == evalues)[0][0]\n index_list.append(indexes)\n \n \n evector_list = []\n for i in index_list:\n evector_list.append(evectors[i])\n \n evector_array = np.array(evector_list)\n \n reduced_matrix = np.dot(data_matrix, evector_array.transpose())\n \n return pd.DataFrame(reduced_matrix)", "def pca_2(emb) :\n pcaer = skd.PCA(n_components=2)\n pca = pcaer.fit_transform(emb)\n \n return pca", "def pca_pubdev_4167_OOM():\n h2o.remove_all()\n transform_types = [\"NONE\", \"STANDARDIZE\", \"NORMALIZE\", \"DEMEAN\", \"DESCALE\"] # make sure we check all tranforms\n transformN = transform_types[randint(0, len(transform_types)-1)]\n print(\"transform used on dataset is {0}.\\n\".format(transformN))\n\n training_data = h2o.import_file(path=pyunit_utils.locate(\"/Users/wendycwong/gitBackup/SDatasets/pubdev_4167_Avkash/m120K.tar\")) # Nidhi: import may not work\n\n gramSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN)\n gramSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n powerSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN, pca_method=\"Power\")\n powerSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n # compare singular values and stuff between power and GramSVD methods\n print(\"@@@@@@ Comparing eigenvalues between GramSVD and Power...\\n\")\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"importance\"],\n powerSVDPCA._model_json[\"output\"][\"importance\"],\n [\"Standard deviation\", \"Cumulative Proportion\", \"Cumulative Proportion\"],\n tolerance=1e-5, check_all=False)\n print(\"@@@@@@ Comparing eigenvectors between GramSVD and Power...\\n\")\n # compare singular vectors\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"names\"], tolerance=1e-1,\n check_sign=True)", "def process(data):\n\n images = data.collection.images()\n descriptors, descriptor_colors, random_colors = load_descriptors(data.descriptor, images)\n\n if data.pca.config.feature_mode == FeatureMode.Colors:\n pc_projections, pcs = process_features(random_colors, data.pca.config.neutral_factor)\n elif data.pca.config.feature_mode == FeatureMode.Descriptors:\n pc_projections, pcs = process_features(descriptors, data.pca.config.neutral_factor)\n else:\n pc_projections, pcs = process_combined_features(descriptors, descriptor_colors, random_colors,\n data.pca.config.descriptor_weight,\n data.pca.config.neutral_factor)\n\n data.pca.save(images, pc_projections, pcs)", "def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)", "def pca_transform(X, n_components=None):\n return PCA(n_components=n_components).fit_transform(X)", "def ICA(self, ep):\n #%% Artifact Correction with ICA\n \n from mne.preprocessing import ICA\n # ICA parameters:\n n_components = 20 # if float, select n_components by explained variance of PCA\n method = 'fastica'\n decim = 3 # need sufficient statistics, not all time points -> saves time\n\n # Set state of the random number generator - ICA is a\n # non-deterministic algorithm, but we want to have the same decomposition\n # and the same order of components each time\n\n picks_eeg = mne.pick_types(ep.info, meg=False, eeg=True, \n eog=False, stim=False, exclude='bads')\n\n # Define the ICA object instance\n ica = ICA(n_components=n_components, method=method, random_state = 23)\n print(ica)\n \n\n # avoid fitting ICA on crazy environmental artifacts that would\n # dominate the variance and decomposition\n reject = dict(eeg=40e-6)\n\n ica.fit(ep, picks=picks_eeg, reject = reject, decim=decim)\n\n if self.ICAplot:\n ica.plot_components() # can you spot some potential bad guys?\n\n #% Artifact detection\n eog_inds, scores = ica.find_bads_eog(ep, ch_name = 'Fp1', threshold=1) # find via correlation\n #ica.plot_scores(scores, exclude=eog_inds) # look at r scores of components\n ica.exclude.extend(eog_inds)\n\n # apply ICA\n ep = ica.apply(ep, exclude=eog_inds)\n\n if self.MRCP_bandpass: # this is basically for NIKUNJ data (by default it is bandpassed)\n # Extract MRCP and return a *band-pass* filtered signal in the range .1 Hz - 4 Hz\n ep.filter(None, 4., l_trans_bandwidth='auto', h_trans_bandwidth='auto',\n filter_length='auto', phase='zero')\n\n return ep", "def _create_PCA(self, mesh_dataset):\r\n N, V, dims = mesh_dataset.shape\r\n\r\n assert dims == 3, \"vertice dims is not 3.\"\r\n\r\n mesh_data = mesh_flatten(mesh_dataset.astype(np.float64))\r\n pca = None \r\n if hasattr(self, \"n_component\"):\r\n pca = PCA(self.n_component)\r\n \r\n print(self.n_component)\r\n unflat_function = f.partial(mesh_unflatten, vertice_size=V)\r\n print(mesh_data.shape)\r\n print(mesh_data)\r\n # pca.fit(mesh_data)\r\n pca = pca.fit(mesh_data.T)\r\n\r\n return pca, unflat_function", "def extract(self,image_path):#image_path\r\n\r\n img = caffe.io.load_image(image_path)\r\n \r\n #image1=cv2.imread(caffe_root + 'examples/images/cat.jpg') \r\n #img=cv2.cvtColor(image1,cv2.COLOR_BGR2RGB) \r\n #img=img/255. \r\n \r\n\r\n transformed_image = self.transformer.preprocess('data', img)\r\n self.net.blobs['data'].data[...] = transformed_image\r\n ft = self.net.forward()\r\n ft = np.squeeze(ft['pool5/7x7_s1'])\r\n ft = ft / LA.norm(ft)\r\n return ft", "def build(self, data: np.ndarray):\n ret = data.dot(self.eigenvectors)\n self.pca_predictor_vars = ret\n return ret", "def load_vecs(fin):\n h5f = tables.open_file(fin)\n h5vecs= h5f.root.vecs\n\n vecs=np.zeros(shape=h5vecs.shape,dtype=h5vecs.dtype)\n vecs[:]=h5vecs[:]\n h5f.close()\n return vecs", "def get_features(files):\n files = files.tolist()\n return np.array([pipeline(file) for file in files])", "def read_qmcpack_dense(filename):\n with h5py.File(filename, 'r') as fh5:\n enuc = fh5['Hamiltonian/Energies'][:][0]\n dims = fh5['Hamiltonian/dims'][:]\n hcore = fh5['Hamiltonian/hcore'][:]\n chol = fh5['Hamiltonian/DenseFactorized/L'][:]\n\n return hcore, chol, enuc", "def project(self, new_expn):\n \"\"\"\n data = numpy.array(self.parent.serialisedArrayDataList)\n import sklearn\n skpca = sklearn.decomposition.PCA()\n X_r = skpca.fit(data).transform(data)\n \n self.__v = X_r\n \"\"\"\n # old martrisx\n matrix = numpy.array(self.parent.serialisedArrayDataList)\n U, S, V = numpy.linalg.svd(matrix.T, full_matrices=False)\n \n print(\"matrix\", matrix.shape)\n \n # set-ups\n self.parent = new_expn\n if self.rowwise:\n self.labels = new_expn[self.label_key]\n else:\n self.labels = new_expn.getConditionNames()\n \n matrix = numpy.array(self.parent.serialisedArrayDataList)\n S = numpy.diag(S)\n print(\"U\", U.shape)\n print(\"V\", V.shape)\n print(\"S\", S.shape)\n print(\"matrix\", matrix.shape)\n \n #data = np.dot(U, np.dot(S, V))\n #X_transformed = np.dot(X_transformed, self.V.T)\n print(numpy.dot(S, V).shape)\n\n pr = numpy.dot(matrix, S)\n print(\"pr\", pr.shape)\n #y = x*W;\n #y0 = Y(1,:);\n #sum(abs(y0 - y)) %\n \n # I want a new v. U and D are the same.\n \n self.__v = pr\n \n print(U)\n print()\n print(pr)\n \n print(numpy.allclose(U, pr)) \n print(numpy.allclose(matrix.T, numpy.dot(U, numpy.dot(S, V))))\n return(True)", "def pca(features, components=6):\n pca = PCA(n_components=components)\n transformed = pca.fit(features).transform(features)\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(transformed)\n return scaler.transform(transformed), pca, scaler", "def load_data(from_stored_data=False):\n\n if from_stored_data:\n #data_X = pickle.load(open(file_X, \"rb\"))\n data_X = pickle.load(open(\"x_sparse_small.p\", \"rb\"))\n #data_Y = pickle.load(open(file_Y, \"rb\"))\n data_Y = pickle.load(open(\"y_sparse_small.p\", \"rb\"))\n return data_X, data_Y\n\n data_X = None\n data_Y = None\n\n for num_subject in range(num_subjects):\n print \"subject :\", str(num_subject+1), \" processing started \"\n ind_data_x = None\n ind_data_y = None\n \n subject_data = sio.loadmat(\"data/data-science-P\" + str(num_subject + 1) + \".mat\")\n\n # big three headers\n meta = subject_data.get(\"meta\")\n info = subject_data.get(\"info\")[0]\n trials = subject_data.get(\"data\")\n\n # meta data\n nvoxels = meta[\"nvoxels\"][0][0][0][0]\n colToCoord = meta[\"colToCoord\"][0][0]\n coordToCol = meta[\"coordToCol\"][0][0]\n for num_trial in range(len(trials)):\n sys.stdout.write(str(num_trial)+\" \")\n sys.stdout.flush()\n # create feature vectors\n voxels = trials[num_trial][0][0]\n #feature_vec = np.zeros(dim_x * dim_y * dim_z)\n feature_vec = np.zeros((dim_x_half, dim_y, dim_z))\n for i in range(len(voxels)):\n # save only the left of the voxels to decrease the dimension of the vector \n colInfo = colToCoord[i, :]\n x = colInfo[0] - 1 # index in data starts from 1\n y = colInfo[1] - 1 # same\n z = colInfo[2] - 1 # same\n if x < dim_x_half:\n feature_vec[x][y][z] = voxels[i]\n #feature_vec[z * (dim_x * dim_y) + y * dim_x + x] = voxels[i]\n #feature_vec[z * (dim_x_half * dim_y) + y * dim_x_half + x] = voxels[i]\n feature_vec = feature_vec.flatten()\n feature_vec = sp.csr_matrix(feature_vec)\n\n # create label vectors\n trial_info = info[num_trial]\n cond_number = trial_info[1][0][0] - 2 # starts from 2 (2 ~ 13)\n word_number = trial_info[3][0][0] - 1 # starts from 1 (1 ~ 5)\n label_vec = np.zeros(num_conds * num_words_per_cond)\n label_vec[cond_number * num_words_per_cond + word_number] = 1\n \n # append data\n #data_X = sp.vstack((data_X, feature_vec)) if data_X is not None else feature_vec\n #data_Y = np.vstack((data_Y, label_vec)) if data_Y is not None else label_vec\n ind_data_x = sp.vstack((ind_data_x, feature_vec)) if ind_data_x is not None else feature_vec\n ind_data_y = np.vstack((ind_data_y, label_vec)) if ind_data_y is not None else label_vec\n\n # save ind_data files\n pickle.dump(ind_data_x, open(\"ind_\"+str(num_subject+1)+\"_x\", \"wb\"))\n pickle.dump(ind_data_y, open(\"ind_\"+str(num_subject+1)+\"_y\", \"wb\"))\n\n print \"subject :\", str(num_subject+1), \" processing done \"\n \n # save data file\n #pickle.dump(data_X, open(file_X, \"wb\"))\n #pickle.dump(data_Y, open(file_Y, \"wb\"))\n\n return data_X, data_Y", "def kernelpca(X, n_comp):\n estimator = decomposition.KernelPCA(n_components = n_comp, kernel = 'rbf')\n estimator.fit(X)\n X_proj = estimator.transform(X)\n return estimator.components_, X_proj,", "def run_PCA(self, sparse_matrix):\n\n pca_explained = np.cumsum(PCA().fit(sparse_matrix).explained_variance_ratio_)\n pca_explainedby = np.where(pca_explained>=0.9)[0][0]\n pca = PCA(n_components=pca_explainedby)\n pca.fit(sparse_matrix)\n \n today = datetime.date.today()\n filename = 'sparse_pca_model.pkl'\n joblib.dump(pca, filename)\n \n return pca.transform(sparse_matrix), pca", "def test_affine():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n affine_dir = root_dir + \"affine_orig_v2/\"\n MIN_MATCH_COUNT = 10\n # 1088 is more accurate\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n print(seq)\n with open(os.path.join(affine_dir, seq+'.pickle'), 'rb') as fin:\n affine_dict = pickle.load(fin)\n seq_files = os.listdir(os.path.join(seq_dir, seq))\n seq_files = sorted(seq_files, key=lambda x: int(x[:-4]))\n for i in range(34, len(seq_files)-1):\n frame_name = \"{:07d}.jpg\".format(i)\n M = affine_dict[frame_name]\n print(i)\n image0 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i]))\n image1 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i+1]))\n image0 = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)\n image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n # surf = cv2.xfeatures2d.SURF_create()\n # kp0, des0 = surf.detectAndCompute(image0, None)\n # kp1, des1 = surf.detectAndCompute(image1, None)\n # FLANN_INDEX_KDTREE = 0\n # index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n # search_params = dict(checks=10)\n #\n # flann = cv2.FlannBasedMatcher(index_params, search_params)\n # matchs = flann.knnMatch(des0, des1, k=2)\n #\n # # store all the good matchs as per Lowe's ratio test\n # good = []\n # for m, n in matchs:\n # if m.distance < 0.7 * n.distance:\n # good.append(m)\n # if len(good) > MIN_MATCH_COUNT:\n # src_pts = np.float32([kp0[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n # dst_pts = np.float32([kp1[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n # M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n # else:\n # M = np.eye(3, 3)\n\n image0_transform = cv2.warpPerspective(image0, M, (image0.shape[1], image0.shape[0]))\n bbox = np.array([540, 540, 600, 1079])\n bbox_expand = np.ones((3, 2))\n bbox_expand[:2, 0] = bbox[:2]\n bbox_expand[:2, 1] = bbox[2:]\n bbox_expand = np.dot(M, bbox_expand)\n bbox_transform = np.concatenate([bbox_expand[:2, 0], bbox_expand[:2, 1]])\n bbox_transform = bbox_transform.astype(np.uint64)\n\n # show the images\n plt.figure(i, figsize=(16, 9))\n plt.subplot(2, 2, 1)\n cv2.rectangle(image0, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)\n cv2.rectangle(image0, (bbox_transform[0], bbox_transform[1]), (bbox_transform[2], bbox_transform[3]),\n (0, 0, 255), 2)\n plt.imshow(image0)\n plt.subplot(2, 2, 2)\n cv2.rectangle(image1, (bbox_transform[0], bbox_transform[1]), (bbox_transform[2], bbox_transform[3]),\n (0, 255, 0), 2)\n plt.imshow(image1)\n plt.subplot(2, 2, 3)\n cv2.rectangle(image0_transform, (bbox_transform[0], bbox_transform[1]), (bbox_transform[2], bbox_transform[3]),\n (0, 255, 0), 2)\n plt.imshow(image0_transform)\n plt.show()", "def run(self, data):\n\t\treduced_data = PCA(n_components=2).fit_transform(data)\n\n\t\t# Run the algorithm\n\t\tself.estimator.fit_transform(reduced_data)\n\n\t\t# Save all relevent properties\n\t\tself.input_data = data\n\t\tself.centroids = self.estimator.cluster_centers_\n\t\tself.node_positions = reduced_data\n\t\tself.labels = self.estimator.labels_\n\n\t\t# Enable visualising when debugging\n\t\t# self.visualize(reduced_data)", "def do_pca(X, y, components: int = 2, plot: bool = True):\n\n new_X = []\n for i in X:\n new_X.append(i.flatten())\n\n X = new_X\n\n # PCA Stuff?\n pca = PCA(n_components=components)\n pca.fit(X)\n\n # Transform input data based on eigenvectors\n X = pca.transform(X)\n\n # Get scatters\n x = [i[0] for i in X]\n w = [i[1] for i in X]\n\n # plot\n\n plt.scatter(x, w, c=y)\n plt.show()", "def read_czi(filename, trim=False, swapaxes=True):\n stack = czifile.imread(filename)\n stack = np.squeeze(stack)\n # Trim off last frame \n if trim:\n stack = stack[0:stack.shape[0]-1]\n if (swapaxes):\n stack = np.swapaxes(stack,0,1)\n return stack", "def pca(data, components):\n\n\t_pca = PCA(n_components = components)\n\t_pca.fit(data)\n\tvar = _pca.explained_variance_ratio_\n\tcum_var = np.cumsum(np.round(var, decimals=4)*100)\n\tfig = plt.plot(cum_var)\n\trotation = pd.DataFrame(\n\t\t_pca.components_,\n\t\tcolumns = data.columns,\n\t\tindex = ['PC-1','PC-2','PC-3','PC-4','PC-5','PC-6','PC-7','PC-8','PC-9',]\n\t\t)\n\n\treturn (fig, rotation)", "def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label", "def pca(X, ndim):\n\n Xmean = X - np.mean(X, axis=0)\n _, _, vh = np.linalg.svd(Xmean)\n W = vh[:ndim].T\n T = np.matmul(Xmean, W)\n\n return T", "def pca_helper(_args):\n # unpack args\n _trimmed_frame, _win, _sou_name, _sou_dir, _out_path, \\\n _library, _library_names_short, _fwhm, _plsc, _sigma, _nrefs, _klip = _args\n\n # run pca\n try:\n output = pca(_trimmed_frame=_trimmed_frame, _win=_win, _sou_name=_sou_name,\n _sou_dir=_sou_dir, _out_path=_out_path,\n _library=_library, _library_names_short=_library_names_short,\n _fwhm=_fwhm, _plsc=_plsc, _sigma=_sigma, _nrefs=_nrefs, _klip=_klip)\n return output\n except Exception as _e:\n print(_e)\n return None\n # finally:\n # return None", "def extract(src_dir,feat_file,ivectors_dir,num_gselect):\n os.system(\"./extract_ivectors.sh --num-gselect \"+str(num_gselect)+ \" \" + src_dir + \" \" + feat_file + \" \" + ivectors_dir)\n keys=[]\n ivectors=np.empty((0,0))\n for key,mat in kaldi_io.read_vec_flt_scp(ivectors_dir+'/ivector.scp'):\n if ivectors.shape[1] != mat.shape[0]:\n ivectors=ivectors.reshape((0,mat.shape[0]))\n ivectors=np.vstack((ivectors,mat))\n keys.append(key)\n\n ivectors=np.asarray(ivectors)\n keys=np.asarray(keys)\n return ivectors,keys", "def get_pca_data(dataframe):\n # We don't reduce dimensionality, but overlay the 2 principal components in 2D.\n pca = PCA(n_components=2)\n \n x = dataframe[['df1', 'df2']].values\n try:\n # df1 and df2 have the same scale. No need to standardize. Standardizing might actually distort PCA here.\n pca.fit(x)\n except ValueError:\n # Return empty.\n df = pd.DataFrame(columns=['var_expl', 'var_expl_ratio', 'x', 'y', 'meanx', 'meany'])\n else:\n df = pd.DataFrame({'var_expl': pca.explained_variance_.T,\n 'var_expl_ratio': pca.explained_variance_ratio_.T * 100, # In percent\n 'x': pca.components_[:, 0],\n 'y': pca.components_[:, 1],\n 'meanx': pca.mean_[0],\n 'meany': pca.mean_[1],\n },\n index=[1, 2] # For designating principal components.\n )\n df.index.rename('PC', inplace=True)\n return df", "def preprocess(train_dataset, test_dataset):\n pca = PCA(n_components=20)\n pca.fit(train_dataset)\n train_dataset = pca.transform(train_dataset)\n test_dataset = pca.transform(test_dataset)\n return train_dataset, test_dataset", "def pca(X: np.array, k: int) -> np.array:\n n, d = X.shape\n X = X - np.mean(X, 0) # mean value of each dimension\n C = np.dot(np.transpose(X), X) # covariance matrix\n if not PCA._check_real_symmetric(C):\n raise ArithmeticError('Covariance matrix is not real symmetric')\n eig_val, eig_vec = np.linalg.eig(C) # eigenvalue, eigenvector\n eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(d)] # eigen-value-vector tuples\n topk_pairs = heapq.nlargest(k, eig_pairs) # retrieve top-k eigenvalue pairs\n P = np.array([pair[1] for pair in topk_pairs]) # permutation matrix\n return np.dot(np.real(P), np.transpose(X)).T", "def PCA_reduction(\n df: pd.DataFrame,\n cols: List[str],\n n_components: int,\n prefix: str = 'PCA_',\n random_seed: int = 42,\n keep: bool = False\n) -> pd.DataFrame:\n print(\"Executing PCA reduction on dataset...\")\n df = df.copy()\n pca = decomposition.PCA(n_components=n_components, random_state=random_seed)\n\n principal_components = pca.fit_transform(df[cols])\n\n principal_df = pd.DataFrame(principal_components)\n if not keep:\n df.drop(cols, axis=1, inplace=True)\n\n principal_df.rename(columns=lambda x: str(prefix) + str(x), inplace=True)\n\n # Align index of principal components and the original dataset.\n principal_df = principal_df.set_index(df.index)\n\n df = pd.concat([df, principal_df], axis=1)\n\n return df", "def prepare_train_pascal_data(args):\n image_dir, annotation_dir, data_dir = args.train_pascal_image_dir, args.train_pascal_annotation_dir, args.train_pascal_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n files = os.listdir(annotation_dir)\n img_ids = list(range(len(files)))\n\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for f in files:\n annotation = os.path.join(annotation_dir, f)\n\n tree = ET.parse(annotation)\n root = tree.getroot()\n\n img_name = root.find('filename').text \n img_file = os.path.join(image_dir, img_name)\n img_files.append(img_file) \n\n img_id_str = os.path.splitext(img_name)[0]\n\n size = root.find('size')\n img_height = int(size.find('height').text)\n img_width = int(size.find('width').text)\n img_heights.append(img_height) \n img_widths.append(img_width) \n\n anchor_files.append(os.path.join(data_dir, img_id_str+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for obj in root.findall('object'): \n class_name = obj.find('name').text\n class_id = pascal_class_ids[class_name]\n classes.append(class_id) \n\n bndbox = obj.find('bndbox')\n xmin = int(bndbox.find('xmin').text)\n ymin = int(bndbox.find('ymin').text)\n xmax = int(bndbox.find('xmax').text)\n ymax = int(bndbox.find('ymax').text)\n bboxes.append([ymin, xmin, ymax-ymin+1, xmax-xmin+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return dataset", "def inverse_pca(self, pca_img, components):\n reconstruct = np.dot(pca_img, components.T).astype(int)\n return reconstruct.reshape(-1, 28, 28)", "def apply_algorithms(x: np.ndarray, label_true, params, components, database_name):\n names = ['Original dataset', 'Our PCA results', 'KMeans with previous our PCA reduction',\n 'KMeans without previous reduction (PCA)', 'KMeans without previous reduction (T-SNE)']\n\n datasets = []\n labels = []\n reduct = []\n\n # get the representation of the original matrix splitted to be plotted\n partial_x = split_db_original(x, components)\n datasets.append(partial_x)\n labels.append(label_true)\n reduct.append(None)\n\n # get our PCA\n pca = OPCA(n_components=params['n_components'])\n our_pca = pca.fit_transform(x)\n datasets.append(our_pca)\n labels.append(label_true)\n reduct.append(None)\n\n # get PCA and IPCA from sklearn\n sk_pca = pca_sklearn(x, params['db_name'], params['n_components'])\n sk_ipca = ipca_sklearn(x, params['db_name'], params['n_components'])\n\n # compare the three PCA algorithms\n name = ['Our PCA', 'SK_PCA', 'SK_IPCA', 'original_data']\n pca_data = [our_pca, sk_pca['db'], sk_ipca['db'], x]\n apply_evaluation(pca_data, label_true, params, name, database_name)\n\n # KMeans with PCA reduction\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels_kmeans = algorithm.fit_predict(our_pca)\n datasets.append(our_pca)\n labels.append(labels_kmeans)\n reduct.append(None)\n\n # KMeans without PCA reduction\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels_kmeans = algorithm.fit_predict(x)\n datasets.append(x)\n labels.append(labels_kmeans)\n reduct.append('pca')\n datasets.append(x)\n labels.append(labels_kmeans)\n reduct.append('tsne')\n\n # selection number of dimensions of plot\n if type(params['n_components']) == int:\n if params['n_components'] == 2:\n nd = 2\n if params['n_components'] > 2:\n nd = 3\n elif type(params['n_components']) == float:\n if our_pca.shape[1] == 2:\n nd = 2\n if our_pca.shape[1] > 2:\n nd = 3\n else:\n nd = 3\n\n if nd == 2:\n pca_names = ['PCA Component 1', 'PCA Component 2']\n plot_names = [components[0], pca_names, pca_names, pca_names, ['TSNE 1', 'T-SNE 2']]\n plot2d(datasets, labels, names, plot_names, reduct)\n elif nd == 3:\n pca_names = ['PCA Component 1', 'PCA Component 2', 'PCA Component 3']\n plot_names = [components[0], pca_names, pca_names, pca_names, ['TSNE 1', 'T-SNE 2', 'T-SNE 3']]\n plot3d(datasets, labels, names, plot_names, reduct)", "def load_egohands_dataset(root: str):\n\n # iterate over all sub-directory in root\n for dir_name in os.listdir(root):\n path = os.path.join(root, dir_name)\n if os.path.isdir(path):\n # path is the sub-directory of root\n # check the presence of polygons.mat in the directory\n full_path = os.path.join(path, 'polygons.mat')\n if os.path.isfile(full_path):\n # get the list of frames, which is all file in the directory with \"frame_\" and \".jpg\" in the file name\n # we don't have to make this a list, since sorting on the iterable is acceptable\n frames = filter(lambda fn: 'frame_' in fn and '.jpg' in fn, os.listdir(path))\n # os.listdir list file with correct order only on some platforms, so we have to sort it to make sure the rank is correct\n frames = sorted(frames)\n\n # we treat sub-directory name in root as the scene name\n scene = dir_name\n\n # load all polygons, and change its format into what we want (3-d array)\n polygons = loadmat(full_path)['polygons'][0]\n polygons = np.stack([ polygons[label] for label in orig_labels ], axis=1)\n\n # co-iterate frame and polygon\n # if len(frames) and len(polygons) are not the same, exception will be thrown\n for framedata in zip(frames, polygons):\n\n # retrive frame-polygon pair\n f, p = framedata\n f = os.path.join(path, f) # build full path of frame\n\n # calculate bounding rect of each polygon (we do not use MaskRCNN so the rectangle region should work)\n boxes = []\n labels = []\n for label_id in range(len(orig_labels)):\n label_name = orig_labels[label_id]\n if p[label_id].shape[1] != 0:\n boxes.append(torch.tensor(get_bounding_rect(p[label_id].squeeze()), dtype=torch.float))\n labels.append(label_id)\n \n # if we store image in memory, load image now\n if MEMORY_CACHE:\n f = cv2.imread(f)\n f = torch.from_numpy(f).permute((2, 0, 1)).float() # change shape into (band, width, height)\n \n # if we have a box in this frame, show it\n if len(boxes) > 0:\n yield { 'file': f, 'scene': scene, 'boxes': torch.stack(boxes), 'labels': torch.tensor(labels, dtype=torch.int64) }\n else:\n print('Warning: {} does not exist.'.format(full_path))\n return", "def do_pca(x_data, n_class):\n\n run_pca = decomposition.PCA(n_components = n_class)\n pca_fit = run_pca.fit(x_data)\n #pca_fit\n x_pca = run_pca.transform(x_data);\n #pca_cov = run_pca.get_covariance(x_pca)\n #pca_score = run_pca.score(x_data)\n pca_noise = pca_fit.noise_variance_\n pca_var_explained = pca_fit.explained_variance_ratio_\n\n return x_pca, pca_noise, pca_var_explained" ]
[ "0.64291614", "0.62826765", "0.62180424", "0.6019128", "0.6007571", "0.60039264", "0.59644985", "0.59346586", "0.5876533", "0.57791483", "0.57538944", "0.5750904", "0.57483804", "0.57071185", "0.5668783", "0.56590796", "0.5637482", "0.5597626", "0.55744284", "0.5562899", "0.5535298", "0.55292225", "0.5503835", "0.55013126", "0.5486012", "0.5478789", "0.5470229", "0.546947", "0.54495", "0.5441524", "0.5440769", "0.54306936", "0.541566", "0.54127014", "0.5405781", "0.54021984", "0.5384808", "0.5362954", "0.53608656", "0.5353057", "0.53067744", "0.528947", "0.5281025", "0.52784216", "0.5269824", "0.52578396", "0.5253146", "0.52275234", "0.5212363", "0.52098846", "0.51998526", "0.51992834", "0.5196553", "0.5194992", "0.51885647", "0.51854014", "0.51606023", "0.51400125", "0.5124668", "0.5124519", "0.51116115", "0.5097274", "0.5095277", "0.507667", "0.5072097", "0.5070801", "0.50659156", "0.505826", "0.50570506", "0.50538343", "0.504812", "0.50425595", "0.50368726", "0.5033159", "0.50316614", "0.50296", "0.5029386", "0.5025891", "0.50025076", "0.4994529", "0.49821612", "0.49790066", "0.49658135", "0.49653578", "0.4964238", "0.49631944", "0.49506325", "0.49421802", "0.49409786", "0.49395296", "0.4939354", "0.49199152", "0.49180654", "0.49169543", "0.49073306", "0.49065495", "0.4904775", "0.490005", "0.48982316", "0.4893418" ]
0.5309758
40
Find the z rotation such that ZA RA is as close as possible to RB this maximizes trace of ( RB^T ZA RA) = trace(ZA RA RB^T)
def find_inplane_to_match(phiA,thetaA,phiB,thetaB,psiA=0,psiB=0): #from math import pi, sqrt, cos, acos, sin RA = Transform({'type': 'spider', 'phi': phiA, 'theta': thetaA, 'psi': psiA}) RB = Transform({'type': 'spider', 'phi': phiB, 'theta': thetaB, 'psi': psiB}) RBT = RB.transpose() RABT = RA * RBT RABTeuler = RABT.get_rotation('spider') RABTphi = RABTeuler['phi'] RABTtheta = RABTeuler['theta'] RABTpsi = RABTeuler['psi'] #deg_to_rad = pi/180.0 #thetaAR = thetaA*deg_to_rad #thetaBR = thetaB*deg_to_rad #phiAR = phiA*deg_to_rad #phiBR = phiB *deg_to_rad #d12=cos(thetaAR)*cos(thetaBR) + sin(thetaAR)*sin(thetaBR)*cos(phiAR-phiBR) return (-RABTpsi-RABTphi),RABTtheta # 180.0*acos(d12)/pi;
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_optimal_angle(teta_z, latitude, transmissivity):\n if transmissivity <= 0.15:\n gKt = 0.977\n elif 0.15 < transmissivity <= 0.7:\n gKt = 1.237 - 1.361 * transmissivity\n else:\n gKt = 0.273\n Tad = 0.98 # transmittance-absorptance product of the diffuse radiation\n Tar = 0.97 # transmittance-absorptance product of the reflected radiation\n Pg = 0.2 # ground reflectance of 0.2\n l = radians(latitude)\n a = radians(teta_z)\n b = atan((cos(a) * tan(l)) * (1 / (1 + ((Tad * gKt - Tar * Pg) / (2 * (1 - gKt)))))) # eq.(11)\n return abs(b)", "def find_za(lyb_z):\n return (lyb_z + 1)*(lyb_rest/lya_rest)-1", "def findRotationMatrix(xS, yS, zS):\n dist = zS**2+xS**2\n #make transformation of z-y plane using angle from linear fit\n xS = np.reshape(xS, (-1,1))\n zS = np.reshape(zS, (-1,1))\n turns = np.argpartition(dist, -250)[:-250]\n# #use ransac to obtain a fit of all non-turn points \n# #(use the fact there are more non-turns than turn points\n# reg = linear_model.RANSACRegressor()\n# reg.fit(zS, xS)\n# #use ransac outliers to fit\n# # turns only - fit to align to z-axis\n# turns = np.where(reg.inlier_mask_==False)[0]\n # fit outliers (turns) only\n reg = linear_model.LinearRegression()\n reg.fit(xS[turns], zS[turns]) \n # outliers need to align with z\n theta = np.pi/2.-np.arctan(reg.coef_[0])[0]\n c, s = np.cos(theta), np.sin(theta)\n R = np.matrix([[0,1,0],[c,0, -s], [s,0, c]])\n\n return R", "def rotate_ZNE_LQT(z, n, e, ba, inc):\n if len(z) != len(n) or len(z) != len(e):\n raise TypeError(\"Z, North and East component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba *= 2 * pi / 360\n inc *= 2 * pi / 360\n l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba)\n q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba)\n t = n * sin(ba) - e * cos(ba)\n return l, q, t", "def angle(z):", "def find_zb(lya_z):\n return (lya_z + 1)*(lya_rest/lyb_rest)-1", "def rotate_RT_NE(n, e, ba):\n ba = 360.0 - ba\n return rotate_NE_RT(n, e, ba)", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def tinker(self, sigma, z):\n \n aa = self.params[0]\n a = self.params[1]\n b = self.params[2]\n c = self.params[3]\n \n aa_z = aa * pow((1.e0+z), -0.14e0)\n a_z = a * pow((1.e0+z), -0.06e0)\n alpha = exp(-(pow(0.75e0/(log(200.e0/0.75e0)), 1.2e0)))\n b_z = b * pow((1.e0+z), -alpha)\n\n f_t = aa_z * ( pow(sigma/b_z, -a_z) + 1.e0 ) * exp( -c/(sigma*sigma) )\n\n return f_t", "def _wrapFix(az, ant):\n azlimpos = queryCommonDouble(\"Drive.Limit.azHighSwLimitVal\", ant)\n azlimneg = queryCommonDouble(\"Drive.Limit.azLowSwLimitVal\", ant)\n if az > azlimpos: return az-360\n if az < azlimneg: return az+360 \n return az", "def calc_lookback_time(z):\n\n def integrand(z):\n return 1.0 / (H_z(z) * (1.0 + z)) * (H0_inverse*H0)\n\n t, t_err = quad(integrand, 0, z)\n\n return t", "def relTrace(mat, spinorsize):\n\n top = mat[:spinorsize, :spinorsize]\n bottom = mat[spinorsize:, spinorsize:]\n return 2*(top+bottom)", "def Rot_to_quaternion(r: array):\n\n # Compute the trace of the rotation matrix\n tr = r[0, 0] + r[1, 1] + r[2, 2]\n\n if tr > 0:\n S = sqrt(tr + 1.0) * 2\n qw = 0.25 * S\n qx = (r[2, 1] - r[1, 2]) / S\n qy = (r[0, 2] - r[2, 0]) / S\n qz = (r[1, 0] - r[0, 1]) / S\n elif (r[0, 0] > r[1, 1]) and (r[0, 0] > r[2, 2]):\n S = sqrt(1.0 + r[0, 0] - r[1, 1] - r[2, 2]) * 2\n qw = (r[2, 1] - r[1, 2]) / S\n qx = 0.25 * S\n qy = (r[0, 1] + r[1, 0]) / S\n qz = (r[0, 2] + r[2, 0]) / S\n elif r[1, 1] > r[2, 2]:\n S = sqrt(1.0 + r[1, 1] - r[0, 0] - r[2, 2]) * 2\n qw = (r[0, 2] - r[2, 0]) / S\n qx = (r[0, 1] + r[1, 0]) / S\n qy = 0.25 * S\n qz = (r[1, 2] + r[2, 1]) / S\n else:\n S = sqrt(1.0 + r[2, 2] - r[0, 0] - r[1, 1]) * 2\n qw = (r[1, 0] - r[0, 1]) / S\n qx = (r[0, 2] + r[2, 0]) / S\n qy = (r[1, 2] + r[2, 1]) / S\n qz = 0.25 * S\n\n q = array([qw, qx, qy, qz])\n q = q * sign(qw)\n\n return q", "def z_r(b):\n return b/1e-2 + 0.5", "def rotate_LQT_ZNE(l, q, t, ba, inc):\n if len(l) != len(q) or len(l) != len(t):\n raise TypeError(\"L, Q and T component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba *= 2 * pi / 360\n inc *= 2 * pi / 360\n z = l * cos(inc) + q * sin(inc)\n n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba)\n e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba)\n return z, n, e", "def altaz2radec(az, alt, t):\n if not isinstance(t, Time):\n try:\n t = Time(t)\n except:\n raise ValueError(\"\\n\\t=== Time syntax should be 'YYYY-MM-DD hh:mm:ss' ===\") \n frame = coord.AltAz(obstime=t, location=nancay())\n altaz = coord.SkyCoord(az*u.deg, alt*u.deg, frame=frame)\n radec = altaz.transform_to(coord.FK5(equinox='J2000'))\n return radec.ra.rad, radec.dec.rad", "def rt60_eyring(S, V, a, m, c):\n\n return -(24 * np.log(10) / c) * V / (S * np.log(1 - a) + 4 * m * V)", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):\n\n\tfrom alignment import Numrinit, ringwe, Applyws, ormq\n\tfrom filter import fshift\n\n\tfirst_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)\t\n\tnx=ima.get_xsize()\n\tif(last_ring == -1): last_ring=int(nx/2)-2\n\tcnx = int(nx/2)+1\n \tcny = cnx\n \tmode = \"F\"\n \t#precalculate rings\n\tnumr = Numrinit(first_ring, last_ring, rstep, mode)\n \twr = ringwe(numr, mode)\n\tif(center==1):\n\t\tcs = [0.0]*2 # additio\n\t\tcs = ref.phase_cog()\n\t\tref1 = fshift(ref, -cs[0], -cs[1])\n\t\tcimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)\n\t\tcs = ima.phase_cog()\n\t\tima1 = fshift(ima, -cs[0], -cs[1])\n\telse:\n\t\tima1=ima.copy()\n\t\tcimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)\n\tUtil.Frngs(cimage, numr)\n\tApplyws(cimage, numr, wr)\n\t[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)\n\treturn angt,sxst, syst, mirrort, peakt", "def determine_rotation(arm, d, tip_data, rot_data):\n n_t = np.zeros(3)\n for this_n_t in tip_data['pos_ntip_wrt_r']:\n n_t += this_n_t\n n_t /= len(tip_data['pos_ntip_wrt_r'])\n print(\"Our n_t to use in this stage: {}\".format(n_t))\n\n K = len(rot_data['pos_ntip_wrt_s'])\n errors_zyz = []\n errors_zyx = []\n\n for k in range(K):\n lhs = rot_data['pos_ntip_wrt_s'][k]\n t_st = rot_data['pos_tool_wrt_s_code'][k]\n ypr = rot_data['rot_tool_wrt_s_code'][k]\n yaw, pitch, roll = ypr[0], ypr[1], ypr[2]\n\n # R_zyz\n R_z1 = U.rotation_matrix_3x3_axis(angle=roll, axis='z')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z2 = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyz = R_z2.dot(R_y).dot(R_z1)\n\n # R_zyx\n R_x = U.rotation_matrix_3x3_axis(angle=roll, axis='x')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyx = R_z.dot(R_y).dot(R_x)\n\n # Evaluate!\n rhs_zyz = t_st + R_zyz.dot( n_t )\n rhs_zyx = t_st + R_zyx.dot( n_t )\n err_zyz = np.linalg.norm(lhs - rhs_zyz)\n err_zyx = np.linalg.norm(lhs - rhs_zyx)\n errors_zyz.append( err_zyz )\n errors_zyx.append( err_zyx )\n print(\"\\nerr_zyz: {:.3f} for {}-th sample\".format(err_zyz, k))\n print(\"err_zyx: {:.3f} for {}-th sample\".format(err_zyx, k))\n print(\"R_zyz:\\n{}\".format(R_zyz))\n print(\"R_zyx:\\n{}\".format(R_zyx))\n\n print(\"\\nDone with evaluation!\")\n print(\"zyz has avg error {:.5f}\".format(np.mean(errors_zyz)))\n print(\"zyx has avg error {:.5f}\".format(np.mean(errors_zyx)))", "def Omega_rad_z(self, z):\n return self.Omega_gamma_z(z)+self.Omega_ur_z(z)", "def test_Z_start(self):\t\t\n self.assertAlmostEqual(attempt.Z[0], 40)", "def alphahighz(self, z):\n return self.alphaMe(3.8,self.r_vect[0],self.alpha0_vect[0]) - 0.018*(z-3.8)", "def kuzmin_rotation(R,c,M,G=astronomicalG):\n return np.sqrt(2*G*np.power(10.,M)*R*R*np.power(c*c+R*R,-1.5))", "def lookback_time(self, z = 1.):\n H_z = self.H_massive if massive_nu_approx else self.H\n integrand = lambda x: const.Mpc_to_km/(H_z(x)*(1.+x))/const.Myr_to_s\n lookback, _ = sint.quad(integrand, 0., z)\n return lookback", "def test_Z_end(self):\t\t\n self.assertAlmostEqual(attempt.Z[-1], 41.47999849170943)", "def altaz_to_radec(alt_az, pos=local_latlong,\n minute=minute, hour=hour, day=day,\n month=month, year=year, tz_offset=5):\n # Retrieve the coordinates and convert them to rads for some trig.\n lat, long = pos[0] * (np.pi/180), pos[1] * (np.pi/180)\n alt, az = alt_az[0] * (np.pi/180), alt_az[1] * (np.pi/180)\n\n gmst = localtime_to_gmst(minute=minute, hour=hour,\n day=day, month=month, year=year, tz_offset=5)\n\n sin_dec = np.sin(alt) * np.sin(lat) + np.cos(alt) * np.cos(lat) * np.cos(az)\n dec = np.arcsin(sin_dec)\n\n cosHA = (np.sin(alt) - np.sin(lat) * np.sin(dec))/(np.cos(lat) * np.cos(dec))\n HA = np.arccos(cosHA) * (180/np.pi)\n\n dec *= (180/np.pi)\n ra = gmst + HA + (long * 180/np.pi) if az < np.pi else gmst - HA + (long * 180/np.pi)\n\n ra_dec = (round(ra, 4), round(dec, 4))\n return ra_dec", "def Schechter_M_z(M, redshift, richness):\n\treturn 0.4 * n.log(10.) * 10**logPhi_evol(redshift, richness) * 10**(0.4 * (M_s_evol(redshift, richness) - M) * (alpha_evol(redshift, richness) + 1)) * n.e**( -10** ( 0.4 * (M_s_evol(redshift,richness) - M)))", "def signed_r_z(rotation_matrix, positions, core):\n arr = np.array\n pos = arr(positions) - core\n rot_pos = arr(rotation_matrix * pos.T).T\n x = rot_pos[:, 0].T\n y = rot_pos[:, 1].T\n z = rot_pos[:, 2].T\n r = (x ** 2 + y ** 2) ** 0.5 * np.sign(-x)\n return r, z", "def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):\n num_points = int(its_elev[0])\n step = its_elev[1]\n dist = num_points * step\n\n # Find the refractivity at the average terrain height\n start_avg = int(3.0 + 0.1 * num_points)\n end_avg = num_points - start_avg + 6\n zsys = np.mean(its_elev[start_avg-1:end_avg])\n refractivity *= np.exp(-zsys/9460.0)\n\n # Find the ray down-curvature per meter\n gma = 157e-9\n gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))\n\n alt_cbsd = its_elev[2] + height_cbsd\n alt_rx = its_elev[num_points+2] + height_rx\n qc = 0.5 * gme\n q = qc * dist\n # theta0 and theta1 the slopes, dl0 and dl1 the horizon distances\n theta1 = (alt_rx - alt_cbsd) / dist\n theta0 = theta1 - q\n theta1 = -theta1 - q\n dl0 = dist\n dl1 = dist\n\n if num_points >= 2:\n sa = 0.0\n sb = dist\n wq = True\n for i in range(1, num_points):\n sa += step\n sb -= step\n q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd\n if q > 0.0:\n theta0 += q/sa\n dl0 = sa\n wq = False\n if not wq:\n q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx\n if q > 0.0:\n theta1 += q/sb\n dl1 = sb\n\n return (np.arctan(theta0) * 180/np.pi,\n np.arctan(theta1) * 180/np.pi,\n dl0,\n dl1)", "def differential_rotation(lat, A, B, C):\n \n lat_deg = lat * np.pi/180.\n return A + B * np.sin(lat_deg)**2 + C * np.sin(lat_deg)**4", "def rotate_NE_RT(n, e, ba):\n if len(n) != len(e):\n raise TypeError(\"North and East component have different length.\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees.\")\n r = e * sin((ba + 180) * 2 * pi / 360) + n * cos((ba + 180) * 2 * pi / 360)\n t = e * cos((ba + 180) * 2 * pi / 360) - n * sin((ba + 180) * 2 * pi / 360)\n return r, t", "def find_rotation_efficient(arr):\n # edge case: already sorted\n if arr[0] < arr[-1]:\n return 0\n\n low = 0\n high = len(arr)-1\n\n # when high is one greater than low, high will be rotation index\n while high - low > 1:\n\n # start guessing at middle\n guess_index = low + (high - low) / 2\n\n # rotation is left\n if arr[guess_index] < arr[low]:\n high = guess_index\n\n # rotation is right\n else:\n low = guess_index\n\n return high", "def m_0(Z0):\n return 35./( 1224. * cos(Z0)**2 + 1 )**0.5", "def zernikeFit(x, y, z,max_rad=225.,cm=[0,0],max_order=20):\n x = x - cm[0]\n y = y - cm[1]\n n = len(x)\n p = max_order\n rho = np.sqrt(x**2+y**2)/max_rad #normalize to unit circle.\n phi = np.arctan2(y,x)\n dataX = []\n ok = rho <= 1.\n for j in range(max_order):\n dataX.append(zernikel(j,rho[ok],phi[ok]))\n dataX=np.array(dataX).T\n beta,SSE,rank,sing = np.linalg.lstsq(dataX,z[ok])# SSE is the residual sum square\n sigma = np.sqrt(SSE/(n-p))\n betaErr = sigma/np.dot(dataX.T,dataX).diagonal()\n SST = np.var(z[ok])*(len(z[ok])-1)# SST is the sum((z_i - mean(z))^2)\n R2 = 1 - SSE/SST\n R2adj = 1-(1-R2)*(len(z[ok])-1)/(len(z[ok])-max_order)# adjusted R2 for quality of fit. \n fitted = np.dot(dataX,beta) # fitted value\n return beta,betaErr,R2adj,fitted", "def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]", "def test_zernike_radial(self): # noqa: C901\n # https://en.wikipedia.org/wiki/Zernike_polynomials#Radial_polynomials\n\n def Z3_1(x, dx=0):\n if dx == 0:\n return 3 * x**3 - 2 * x\n if dx == 1:\n return 9 * x**2 - 2\n if dx == 2:\n return 18 * x\n if dx == 3:\n return np.full_like(x, 18)\n if dx >= 4:\n return np.zeros_like(x)\n\n def Z4_2(x, dx=0):\n if dx == 0:\n return 4 * x**4 - 3 * x**2\n if dx == 1:\n return 16 * x**3 - 6 * x\n if dx == 2:\n return 48 * x**2 - 6\n if dx == 3:\n return 96 * x\n if dx == 4:\n return np.full_like(x, 96)\n if dx >= 5:\n return np.zeros_like(x)\n\n def Z6_2(x, dx=0):\n if dx == 0:\n return 15 * x**6 - 20 * x**4 + 6 * x**2\n if dx == 1:\n return 90 * x**5 - 80 * x**3 + 12 * x\n if dx == 2:\n return 450 * x**4 - 240 * x**2 + 12\n if dx == 3:\n return 1800 * x**3 - 480 * x\n if dx == 4:\n return 5400 * x**2 - 480\n if dx == 5:\n return 10800 * x\n if dx == 6:\n return np.full_like(x, 10800)\n if dx >= 7:\n return np.zeros_like(x)\n\n l = np.array([3, 4, 6])\n m = np.array([1, 2, 2])\n r = np.linspace(0, 1, 11) # rho coordinates\n max_dr = 4\n desired = {\n dr: np.array([Z3_1(r, dr), Z4_2(r, dr), Z6_2(r, dr)]).T\n for dr in range(max_dr + 1)\n }\n radial = {\n dr: zernike_radial(r[:, np.newaxis], l, m, dr) for dr in range(max_dr + 1)\n }\n radial_poly = {\n dr: zernike_radial_poly(r[:, np.newaxis], l, m, dr)\n for dr in range(max_dr + 1)\n }\n for dr in range(max_dr + 1):\n np.testing.assert_allclose(radial[dr], desired[dr], err_msg=dr)\n np.testing.assert_allclose(radial_poly[dr], desired[dr], err_msg=dr)", "def z_half(ctx,t,der=0):\n s=ctx.mpf('0.5')+ctx.j*t\n wpinitial = ctx.prec\n ctx.prec = 15\n tt = t/(2*ctx.pi)\n wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt))\n wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt))\n ctx.prec = wptheta\n theta = ctx.siegeltheta(t)\n ctx.prec = wpz\n rz = Rzeta_set(ctx,s, range(der+1))\n if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2)\n if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4)\n if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8)\n if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16)\n exptheta = ctx.expj(theta)\n if der == 0:\n z = 2*exptheta*rz[0]\n if der == 1:\n zf = 2j*exptheta\n z = zf*(ps1*rz[0]+rz[1])\n if der == 2:\n zf = 2 * exptheta\n z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2)\n if der == 3:\n zf = -2j*exptheta\n z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2]\n z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3)\n if der == 4:\n zf = 2*exptheta\n z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2]\n z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2\n z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4\n z = zf*z\n ctx.prec = wpinitial\n return ctx._re(z)", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def rewofzs3(x,y):\n\n z=x+y*(1j)\n a=1.0/(2.0*z*z)\n q=(1j)/(z*jnp.sqrt(jnp.pi))*(1.0 + a*(1.0 + a*(3.0 + a*(15.0+a*105.0))))\n return jnp.real(q)", "def azalt(ra, dec):\n\tx = rectanglize(ra, dec)\n\ty = np.dot(R_1, x)\n\tz = np.dot(R_2, y)\n\treturn sphericalize(z)", "def modified_zscore(col):\n col = col.dropna()\n med_col = col.median()\n med_abs_dev = MAD(col)\n mod_z = 0.6745*((col- med_col)/med_abs_dev)\n return np.abs(mod_z)", "def azprob(z):\r\n def yfunc(y):\r\n x = (((((((((((((-0.000045255659 * y\r\n +0.000152529290) * y -0.000019538132) * y\r\n -0.000676904986) * y +0.001390604284) * y\r\n -0.000794620820) * y -0.002034254874) * y\r\n +0.006549791214) * y -0.010557625006) * y\r\n +0.011630447319) * y -0.009279453341) * y\r\n +0.005353579108) * y -0.002141268741) * y\r\n +0.000535310849) * y +0.999936657524\r\n return x\r\n\r\n def wfunc(w):\r\n x = ((((((((0.000124818987 * w\r\n -0.001075204047) * w +0.005198775019) * w\r\n -0.019198292004) * w +0.059054035642) * w\r\n -0.151968751364) * w +0.319152932694) * w\r\n -0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0\r\n return x\r\n\r\n Z_MAX = 6.0 # maximum meaningful z-value\r\n x = N.zeros(z.shape,N.float_) # initialize\r\n y = 0.5 * N.fabs(z)\r\n x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's\r\n x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z\r\n prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)\r\n return prob", "def radarScat(sp, wl, K2=0.93):\n#TODO check if K2 is for ice or liquid!\n prefactor = 2*np.pi*wl**4/(np.pi**5*K2)\n \n \n reflect_hh = prefactor*(sp.Z11+sp.Z22+sp.Z12+sp.Z21)\n reflect_vv = prefactor*(sp.Z11+sp.Z22-sp.Z12-sp.Z21)\n kdp = 1e-3*(180.0/np.pi)*wl*sp.S22r_S11r\n\n reflect_hv = prefactor*(sp.Z11 - sp.Z12 + sp.Z21 - sp.Z22)\n #reflect_vh = prefactor*(sp.Z11 + sp.Z12 - sp.Z21 - sp.Z22).values\n ldr_h = reflect_hh/reflect_hv\n \n # delta_hv np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])\n #a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2\n #b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n #c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])\n #rho_hv np.sqrt(a / (b*c))\n rho_hv = np.nan*np.ones_like(reflect_hh) # disable rho_hv for now\n #Ah = 4.343e-3 * 2 * scatterer.wavelength * sp.S22i.values # attenuation horizontal polarization\n #Av = 4.343e-3 * 2 * scatterer.wavelength * sp.S11i.values # attenuation vertical polarization\n\n #- test: calculate extinction: TODO: test Cextx that is given in DDA with this calculation.\n k = 2 * np.pi / (wl)\n cext_hh = sp.S22i*4.0*np.pi/k\n cext_vv = sp.S11i*4.0*np.pi/k\n \n return reflect_hh, reflect_vv, reflect_hv, kdp, rho_hv, cext_hh, cext_vv", "def lookback_time(self, z, z0 = 0.0):\n lt_func = np.vectorize(lambda z, z0: \n si.quad(self._lookback_integrand, z0, z, limit=1000)\n )\n t_look, err = lt_func(z, z0)\n return(t_look)", "def get_lz(self):\r\n return self.dz * self.nz - self.oz", "def _magsqr(z):\n return np.abs(z) ** 2", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def stump_C(z) :\n\n if z > 0 :\n return (1 - cos(sqrt(z)))/z \n elif z < 0 :\n return (cosh(sqrt(-z)) - 1)/(-z)\n else :\n return 0.5", "def Z_high(C, R_L, f):\n return Xcap(C,f) + R_L", "def fus_points2depth(points):\n max_z = 0.0\n min_z = 0.0\n for point in points:\n if point[2] > max_z: max_z = point[2]\n if point[2] < min_z: min_z = point[2]\n return abs(max_z-min_z)", "def spectralRad(A):\n eigs = np.linalg.eig(A)[0]\n eigs = (eigs*eigs.conj())**.5\n return max(eigs)", "def at2wt(zam, at):\r\n return at * 1e24 / Avogadro * aleph.common.awr[zam] * aleph.common.Amn", "def derive_Fitzpactrick09(wavelength, alpha, RV):\n alpha = float(alpha)\n RV = float(RV)\n \n # First we'll calculate k(lambda - V) = E(lambda - V) / E(B - V),\n # directly from equation 5\n k = (0.349 + 2.087*RV) * (1.0 / (1.0 + (wavelength / 0.507)**alpha)) - RV\n\n # We'll calculate Alam/Av from K + Rv\n Alam_Av = (k / RV) + 1. \n \n # Finally, to get A_lambda/Aks we need to divide Alam_Av by AKs_Av.\n # We'll assume central wavelength of 2.14 for Ks\n idx = np.where(abs(wavelength - 2.14) == min(abs(wavelength - 2.14)))\n\n A_AKs_at_wave = Alam_Av / Alam_Av[idx]\n\n return A_AKs_at_wave", "def altAz2RADec(azim, elev, jd, lat, lon):\n\n azim = np.radians(azim)\n elev = np.radians(elev)\n lat = np.radians(lat)\n lon = np.radians(lon)\n \n # Calculate hour angle\n ha = np.arctan2(-np.sin(azim), np.tan(elev)*np.cos(lat) - np.cos(azim)*np.sin(lat))\n\n # Calculate Local Sidereal Time\n lst = np.radians(JD2LST(jd, np.degrees(lon))[0])\n \n # Calculate right ascension\n ra = (lst - ha)%(2*np.pi)\n\n # Calculate declination\n dec = np.arcsin(np.sin(lat)*np.sin(elev) + np.cos(lat)*np.cos(elev)*np.cos(azim))\n\n return np.degrees(ra), np.degrees(dec)", "def z_rec_EH(self):\n # Approximation for drag epoch redshift\n om_m = self.omega_m-np.sum(self.omega_nu)\n om_b = self.omega_b\n g1 = 0.0783*om_b**-0.238/(1.+39.5*om_b**0.763)\n g2 = 0.560/(1+21.1*om_b**1.81)\n z_r = 1048*(1+0.00124*om_b**-0.738)*(1+g1*om_m**g2)\n return z_r", "def project_L1(w, a):\n z = 1.0 / (a * a)\n if np.linalg.norm(w, 1) <= z:\n return w\n mu = -np.sort(-w)\n cs = np.cumsum(mu)\n rho = -1\n for j in range(len(w)):\n if mu[j] - (1.0 / (j + 1)) * (cs[j] - z) > 0:\n rho = j\n theta = (1.0 / (rho + 1)) * (cs[rho] - z)\n return np.sign(w) * np.fmax(w - theta, 0)", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def _derZ(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n - (\n (1 - alpha) * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + alpha * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha) * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha * self.wxInterpolators[i][j](w[c], x[c])\n )\n - (\n (1 - alpha)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + alpha * self.wxInterpolators[i][j - 1](w[c], x[c])\n )\n ) / (self.z_list[j] - self.z_list[j - 1])\n return dfdz", "def stZCR(frame):\n count = len(frame)\n countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2\n return (np.float64(countZ) / np.float64(count-1.0))", "def comp_angle_opening_magnet(self):\n\n if self.W1 > 0:\n Rbo = self.get_Rbo()\n return float(2 * arcsin(self.W1 / (2 * Rbo)))\n else:\n return self.comp_angle_magnet()", "def calculation_time_analysis():\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\t\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)", "def compute_RotMats(a, e, t):\n assert len(a)==len(e)==len(t)\n M = len(a)\n\n # camera intrinsic matrix\n Rz = np.zeros((M, 3, 3), dtype=np.float32)\n Rx = np.zeros((M, 3, 3), dtype=np.float32)\n Rz2 = np.zeros((M, 3, 3), dtype=np.float32)\n # C = np.zeros((M, 1, 3), dtype=np.float32)\n # initial \"1\" positions.\n Rz [:, 2, 2] = 1\n Rx [:, 0, 0] = 1\n Rz2[:, 2, 2] = 1\n #\n R = np.zeros((M, 3, 3), dtype=np.float32)\n\n # convert to radius\n a = a * pi / 180.\n e = e * pi / 180.\n t = t * pi / 180.\n\n # update a, e, t\n a = -a\n e = pi/2.+e\n t = -t\n #\n sin_a, cos_a = np.sin(a), np.cos(a)\n sin_e, cos_e = np.sin(e), np.cos(e)\n sin_t, cos_t = np.sin(t), np.cos(t)\n\n # ===========================\n # rotation matrix\n # ===========================\n \"\"\"\n # [Transposed]\n Rz = np.matrix( [[ cos(a), sin(a), 0 ], # model rotate by a\n [ -sin(a), cos(a), 0 ],\n [ 0, 0, 1 ]] )\n # [Transposed]\n Rx = np.matrix( [[ 1, 0, 0 ], # model rotate by e\n [ 0, cos(e), sin(e) ],\n [ 0, -sin(e), cos(e) ]] )\n # [Transposed]\n Rz2= np.matrix( [[ cos(t), sin(t), 0 ], # camera rotate by t (in-plane rotation)\n [-sin(t), cos(t), 0 ],\n [ 0, 0, 1 ]] )\n R = Rz2*Rx*Rz\n \"\"\"\n\n # Original matrix (None-transposed.)\n # No need to set back to zero?\n Rz[:, 0, 0], Rz[:, 0, 1] = cos_a, -sin_a\n Rz[:, 1, 0], Rz[:, 1, 1] = sin_a, cos_a\n #\n Rx[:, 1, 1], Rx[:, 1, 2] = cos_e, -sin_e\n Rx[:, 2, 1], Rx[:, 2, 2] = sin_e, cos_e\n #\n Rz2[:, 0, 0], Rz2[:, 0, 1] = cos_t, -sin_t\n Rz2[:, 1, 0], Rz2[:, 1, 1] = sin_t, cos_t\n # R = Rz2*Rx*Rz\n R[:] = np.einsum(\"nij,njk,nkl->nil\", Rz2, Rx, Rz)\n\n # Return the original matrix without transpose!\n return R", "def get_z_delta(self, z):\n if self.z is None:\n raise UnknownCarriagePosition\n\n z_delta = z - self.z\n error = z_delta % copysign(self.stepper.MM_PER_STEP, z_delta)\n return z_delta, error", "def _phase_detect(acc_z):\n acc_mag_sd = pd.Series(acc_z).rolling(100).std(center=True)\n min_sd = 1.5\n mov = np.where(acc_mag_sd >= min_sd)[0]\n phase = np.zeros(len(acc_z)).astype(int)\n phase[mov] = 1\n\n return phase", "def stZCR(frame):\n count = len(frame)\n countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2\n return (numpy.float64(countZ) / numpy.float64(count-1.0))", "def z_eq(self):\n theta = self.T_cmb/2.7\n return 25000.*self.Omega_m*self.h**2.*theta**-4.", "def _r_z(angle: tf.Tensor) -> tf.Tensor:\n zero = tf.constant(0, dtype=tf.float64)\n exponent = tf.complex(zero, angle)\n exp = tf.exp(exponent)\n zero_complex = tf.complex(zero, zero)\n one_complex = tf.complex(tf.constant(1, dtype=tf.float64), zero)\n rz = tf.stack([[one_complex, zero_complex], [zero_complex, exp]])\n\n return rz", "def calc_optimal_spacing(sun_properties, tilt_angle, module_length):\n h = module_length * sin(tilt_angle)\n D1 = h / tan(radians(sun_properties.worst_sh))\n D = max(D1 * cos(radians(180 - sun_properties.worst_Az)), D1 * cos(radians(sun_properties.worst_Az - 180)))\n return D", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def test_find_most_rational_surfaces():\n # simple test, linear iota going from 1 to 3\n iota = PowerSeriesProfile([1, 2])\n rho, io = find_most_rational_surfaces(iota, 5)\n np.testing.assert_allclose(rho, np.linspace(0, 1, 5), atol=1e-14, rtol=0)\n np.testing.assert_allclose(io, np.linspace(1, 3, 5), atol=1e-14, rtol=0)", "def getPrintthz(self, zAngleInRadian, preCompElevInRadian=0):\n\n raise NotImplementedError(\"Child class should implemented this.\")", "def zodiac(cls, tee):\n return quotient(float(cls.solar_longitude(tee)), 30) + 1", "def rothesstri(A, b):\n n = shape(A)[0]\n A = hstack([A, b])\n for k in range(n-1):\n r = linalg.norm([ A[k , k] , A[k + 1, k] ])\n if r>0:\n c=A[k, k]/r; s=A[k + 1, k]/r\n A[[k, k + 1],(k + 1):(n + 1)]=[[c, s],[-s, c]]*A[[k, k + 1],(k + 1):(n + 1)]\n A[k, k] = r; A[k+1,k] = 0\n z = A[:, n].copy()\n rbacksolve(A[:, :n], z, n)\n return z", "def max_front_wheel_angle():", "def relu(Z):\n\n A = np.maximum(0,Z)\n cache = Z\n return A, cache", "def lapserate(t, z17):\n import numpy as np\n k = 287.058 / 0.718\n L = np.zeros((17, 145))\n for i in range(16):\n L[i, :] = k * 9.81 * ((t[i+1, :] - t[i, :]) * (z17[i+1] + z17[i])) / (287.058*(t[i+1, :] + t[i, :]) * (z17[i+1] - z17[i]))\n return L", "def Get_CalOutZ_Value(self):\r\n z = self.Get_RawOutZ_Value()\r\n if(z >= self.minZ and z <= self.maxZ):\r\n return 0\r\n else:\r\n return z - self.meanZ", "def calc_IAM_beam_SC(Az_vector, g_vector, ha_vector, teta_z, tilt_angle, type_SCpanel, Sz_vector, latitude):\n\n def calc_teta_L(Az, teta_z, tilt, Sz):\n teta_la = tan(Sz) * cos(teta_z - Az)\n teta_l = degrees(abs(atan(teta_la) - tilt))\n if teta_l < 0:\n teta_l = min(89, abs(teta_l))\n if teta_l >= 90:\n teta_l = 89.999\n return teta_l # longitudinal incidence angle in degrees\n\n def calc_teta_T(Az, Sz, teta_z):\n teta_ta = sin(Sz) * sin(abs(teta_z - Az))\n teta_T = degrees(atan(teta_ta / cos(teta_ta)))\n if teta_T < 0:\n teta_T = min(89, abs(teta_T))\n if teta_T >= 90:\n teta_T = 89.999\n return teta_T # transversal incidence angle in degrees\n\n def calc_teta_L_max(teta_L):\n if teta_L < 0:\n teta_L = min(89, abs(teta_L))\n if teta_L >= 90:\n teta_L = 89.999\n return teta_L\n\n def calc_IAMb(teta_l, teta_T, type_SCpanel):\n if type_SCpanel == 'FP': # # Flat plate collector 1636: SOLEX BLU, SPF, 2012\n IAM_b = -0.00000002127039627042 * teta_l ** 4 + 0.00000143550893550934 * teta_l ** 3 - 0.00008493589743580050 * teta_l ** 2 + 0.00041588966590833100 * teta_l + 0.99930069929920900000\n if type_SCpanel == 'ET': # # evacuated tube Zewotherm ZEWO-SOL ZX 30, SPF, 2012\n IAML = -0.00000003365384615386 * teta_l ** 4 + 0.00000268745143745027 * teta_l ** 3 - 0.00010196678321666700 * teta_l ** 2 + 0.00088830613832779900 * teta_l + 0.99793706293541500000\n IAMT = 0.000000002794872 * teta_T ** 5 - 0.000000534731935 * teta_T ** 4 + 0.000027381118880 * teta_T ** 3 - 0.000326340326281 * teta_T ** 2 + 0.002973799531468 * teta_T + 1.000713286764210\n IAM_b = IAMT * IAML # overall incidence angle modifier for beam radiation\n return IAM_b\n\n # convert to radians\n teta_z = radians(teta_z)\n tilt = radians(tilt_angle)\n\n g_vector = np.radians(g_vector)\n ha_vector = np.radians(ha_vector)\n lat = radians(latitude)\n Sz_vector = np.radians(Sz_vector)\n Az_vector = np.radians(Az_vector)\n Incidence_vector = np.vectorize(solar_equations.calc_incident_angle_beam)(g_vector, lat, ha_vector, tilt,\n teta_z) # incident angle in radians\n\n # calculate incident angles\n if type_SCpanel == 'FP':\n incident_angle = np.degrees(Incidence_vector)\n Teta_L = np.vectorize(calc_teta_L_max)(incident_angle)\n Teta_T = 0 # not necessary for flat plate collectors\n if type_SCpanel == 'ET':\n Teta_L = np.vectorize(calc_teta_L)(Az_vector, teta_z, tilt, Sz_vector) # in degrees\n Teta_T = np.vectorize(calc_teta_T)(Az_vector, Sz_vector, teta_z) # in degrees\n\n # calculate incident angle modifier for beam radiation\n IAM_b_vector = np.vectorize(calc_IAMb)(Teta_L, Teta_T, type_SCpanel)\n\n return IAM_b_vector", "def get_ztf_footprint_corners():\n x = 6.86 / 2\n return [-x, +x, +x, -x] * u.deg, [-x, -x, +x, +x] * u.deg", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def ring_forming_scission(rct_zmas, prd_zmas):\n ret = None\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n tras, _, _ = automol.graph.reac.ring_forming_scission(rct_gras, prd_gras)\n if tras:\n tra = tras[0]\n brk_bnd_key, = automol.graph.trans.broken_bond_keys(tra)\n frm_bnd_key, = automol.graph.trans.formed_bond_keys(tra)\n ts_zma = rct_zmas[0]\n\n # set up radical atom, leaving atom, newly formed radical atom\n # also set up chain between radical atom and newly formed radical atom\n ts_gra = automol.zmatrix.graph(ts_zma)\n rad_atm = list(automol.graph.sing_res_dom_radical_atom_keys(ts_gra))[0]\n for atm in brk_bnd_key:\n if atm not in frm_bnd_key:\n leave_atm = atm\n else:\n new_rad_atm = atm\n\n chain_between = automol.zmatrix.chain_between(ts_zma, new_rad_atm, rad_atm)\n\n tors_names = automol.zmatrix.torsion_coordinate_names(ts_zma)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n ang_90 = numpy.pi/2.\n ts_tors_names = []\n const_tors_names = []\n # (i) set torsion from rad atom towards chain to 90\n for tors_name in tors_names:\n axis = coo_dct[tors_name][0][1:3]\n # (ii) remove torsions in chain_between from final torsion sampling list\n if ((axis[0] not in chain_between) or (axis[1] not in chain_between)):\n ts_tors_names.append(tors_name)\n if ((rad_atm == axis[0] and axis[1] in chain_between) or\n (rad_atm == axis[1] and axis[0] in chain_between)):\n ts_zma_p = automol.zmatrix.set_values(ts_zma, {tors_name: ang_90})\n # const_tors_names.append(tors_name)\n\n # (iii) vary torsions in chain_between to minimize distance from rad_atm to new_rad_atm\n preopt_tors_names = []\n for tors_name in tors_names:\n axis = coo_dct[tors_name][0][1:3]\n if ((axis[0] in chain_between) and (axis[1] in chain_between) and\n (rad_atm not in axis) and (new_rad_atm not in axis)):\n preopt_tors_names.append(tors_name)\n # add any ring forming torsions to constraints to ensure 0 dihedrals for the ring\n const_tors_names.append(tors_name)\n\n angles = [0., 2.*numpy.pi/3, 4.*numpy.pi/3]\n # angles = [0., numpy.pi/3., 2.*numpy.pi/3, 3.*numpy.pi/3., 4.*numpy.pi/3, 5*numpy.pi/3.]\n trial_zmas = [ts_zma_p]\n for preopt_tors_name in preopt_tors_names:\n new_trial_zmas = []\n for zma_i in trial_zmas:\n for ang in angles:\n new_trial_zmas.append(\n automol.zmatrix.set_values(\n zma_i, {preopt_tors_name: ang}))\n trial_zmas = new_trial_zmas\n\n dist_min = 1.0e30\n for trial_zma in trial_zmas:\n geo_i = automol.zmatrix.geometry(trial_zma)\n dist = automol.geom.distance(geo_i, rad_atm, new_rad_atm)\n if dist < dist_min:\n dist_min = dist\n ts_zma = trial_zma\n\n ang_stp = 2.*numpy.pi/6.\n # (iv) vary torsion from new_rad_atm to leaving atom so that leave_atm is far from rad_atm\n for tors_name in tors_names:\n ang = -ang_stp\n axis = coo_dct[tors_name][0][1:3]\n if ((new_rad_atm == axis[0] and axis[1] in chain_between) or\n (new_rad_atm == axis[1] and axis[0] in chain_between)):\n dist_max = 0.0\n for _ in range(6):\n ang += ang_stp\n ts_zma_i = automol.zmatrix.set_values(ts_zma, {tors_name: ang})\n geo_i = automol.zmatrix.geometry(ts_zma_i)\n dist = automol.geom.distance(geo_i, rad_atm, leave_atm)\n if dist > dist_max:\n dist_max = dist\n ts_zma_max = ts_zma_i\n const_tors_names.append(tors_name)\n # set up ts torsions - remove ones with axis in the chain between new and old rad atoms\n if ((axis[0] not in chain_between) or (axis[1] not in chain_between)):\n ts_tors_names.append(tors_name)\n # elif (axis[0] in chain_between) and (axis[1] in chain_between):\n # if tors_name not in const_tors_names:\n # const_tors_names.append(tors_name)\n\n ts_zma = ts_zma_max\n\n # (v) vary angles to decrease rad_atm to new_rad_atm to < 2.25 Ang\n dist_thresh = 4.25\n # dist_thresh = 4.\n ang_names = automol.zmatrix.central_angle_names(ts_zma)\n ring_angs = []\n const_angs_names = []\n for ang_name in ang_names:\n ang_atms = coo_dct[ang_name][0]\n if ((ang_atms[0] in chain_between) and (ang_atms[1] in chain_between) and\n (ang_atms[2] in chain_between)):\n ring_angs.append(ang_name)\n const_angs_names.append(ang_name)\n dist = 1.e30\n ang_stp = numpy.pi/360.\n # ang_stp = 0.5 degrees\n counter = 0\n while ((dist > dist_thresh) and (counter < 30)):\n counter += 1\n values = automol.zmatrix.values(ts_zma)\n for ang_name in ring_angs:\n ang = values[ang_name] - ang_stp\n ts_zma = automol.zmatrix.set_values(ts_zma, {ang_name: ang})\n geo_i = automol.zmatrix.geometry(ts_zma)\n dist = automol.geom.distance(geo_i, rad_atm, new_rad_atm)\n\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key)\n\n # Build the reactants graph\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n # Set the indices for the const coords\n\n ret = (ts_zma, brk_dist_name, brk_bnd_key,\n tuple(const_tors_names), tuple(ts_tors_names),\n tuple(const_angs_names), rcts_gra)\n\n return ret", "def sidereal_zodiac(tee):\n return quotient(int(sidereal_solar_longitude(tee)), 30) + 1", "def mamajek08_logRpHK_max():\n return -3.8918287373004357", "def sortarai(self, datablock, s, Zdiff):\n\n first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], []\n field, phi, theta = \"\", \"\", \"\"\n starthere = 0\n Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M, Treat_AC = [], [], [], [], [], []\n ISteps, ZSteps, PISteps, PZSteps, MSteps, ACSteps = [], [], [], [], [], []\n GammaChecks = [] # comparison of pTRM direction acquired and lab field\n Mkeys = ['measurement_magn_moment', 'measurement_magn_volume',\n 'measurement_magn_mass', 'measurement_magnitude']\n rec = datablock[0]\n for key in Mkeys:\n if key in list(rec.keys()) and rec[key] != \"\":\n momkey = key\n break\n # first find all the steps\n for k in range(len(datablock)):\n rec = datablock[k]\n if 'treat_mw_step' in list(rec.keys()) and rec['treat_mw_step'] is None: rec['treat_mw_step']=\"\"\n if 'treatment_mw_integral' in list(rec.keys()) and rec['treatment_mw_integral'] is None: rec['treatment_mw_integral']=\"\"\n if 'treatment_mw_power' in list(rec.keys()) and rec['treatment_mw_power'] is None: rec['treatment_mw_power']=\"\"\n if 'treatment_temp' in list(rec.keys()) and rec['treatment_temp'] is None:rec['treatment_temp']=\"\"\n if \"treat_mw_step\" in list(rec.keys()) and rec[\"treat_mw_step\"]!=\"\":\n\n THERMAL = False\n MICROWAVE = True\n temp = float(rec[\"treat_mw_step\"])\n elif \"treatment_mw_integral\" in list(rec.keys()) and rec[\"treatment_mw_integral\"]!=\"\":\n THERMAL = False\n MICROWAVE = True\n if \"measurement_description\" in list(rec.keys()):\n MW_step = rec[\"measurement_description\"].strip(\n '\\n').split(\":\")\n for STEP in MW_step:\n if \"Number\" in STEP:\n temp = float(STEP.split(\"-\")[-1])\n elif \"treatment_mw_power\" in list(rec.keys()) and rec[\"treatment_mw_power\"]!=\"\":\n THERMAL = False\n MICROWAVE = True\n if \"measurement_description\" in list(rec.keys()):\n MW_step = rec[\"measurement_description\"].strip(\n '\\n').split(\":\")\n for STEP in MW_step:\n if \"Number\" in STEP:\n temp = float(STEP.split(\"-\")[-1])\n elif \"treatment_temp\" in list(rec.keys()) and rec[\"treatment_temp\"]!=\"\":\n temp = float(rec[\"treatment_temp\"])\n THERMAL = True\n MICROWAVE = False\n methcodes = []\n tmp = rec[\"magic_method_codes\"].split(\":\")\n for meth in tmp:\n methcodes.append(meth.strip())\n # for thellier-thellier\n if 'LT-T-I' in methcodes and 'LP-PI-TRM' in methcodes and 'LP-TRM' not in methcodes:\n Treat_I.append(temp)\n ISteps.append(k)\n if field == \"\":\n field = float(rec[\"treatment_dc_field\"])\n if phi == \"\":\n phi = float(rec['treatment_dc_field_phi'])\n theta = float(rec['treatment_dc_field_theta'])\n\n # for Microwave\n if 'LT-M-I' in methcodes and 'LP-PI-M' in methcodes:\n Treat_I.append(temp)\n ISteps.append(k)\n if field == \"\":\n field = float(rec[\"treatment_dc_field\"])\n if phi == \"\":\n phi = float(rec['treatment_dc_field_phi'])\n theta = float(rec['treatment_dc_field_theta'])\n\n # stick first zero field stuff into first_Z\n if 'LT-NO' in methcodes:\n Treat_Z.append(temp)\n ZSteps.append(k)\n if \"LT-AF-Z\" in methcodes and 'treatment_ac_field' in list(rec.keys()):\n if rec['treatment_ac_field'] != \"\":\n AFD_after_NRM = True\n # consider AFD before T-T experiment ONLY if it comes before\n # the experiment\n for i in range(len(first_I)):\n # check if there was an infield step before the AFD\n if float(first_I[i][3]) != 0:\n AFD_after_NRM = False\n if AFD_after_NRM:\n AF_field = 0\n if 'treatment_ac_field' in rec:\n try:\n AF_field = float(rec['treatment_ac_field']) * 1000\n except ValueError:\n pass\n\n dec = float(rec[\"measurement_dec\"])\n inc = float(rec[\"measurement_inc\"])\n intensity = float(rec[momkey])\n first_I.append([273. - AF_field, 0., 0., 0., 1])\n first_Z.append(\n [273. - AF_field, dec, inc, intensity, 1]) # NRM step\n if 'LT-T-Z' in methcodes or 'LT-M-Z' in methcodes:\n Treat_Z.append(temp)\n ZSteps.append(k)\n if 'LT-PTRM-Z':\n Treat_PZ.append(temp)\n PZSteps.append(k)\n if 'LT-PTRM-I' in methcodes or 'LT-PMRM-I' in methcodes:\n Treat_PI.append(temp)\n PISteps.append(k)\n if 'LT-PTRM-MD' in methcodes or 'LT-PMRM-MD' in methcodes:\n Treat_M.append(temp)\n MSteps.append(k)\n if 'LT-PTRM-AC' in methcodes or 'LT-PMRM-AC' in methcodes:\n Treat_AC.append(temp)\n ACSteps.append(k)\n if 'LT-NO' in methcodes:\n dec = float(rec[\"measurement_dec\"])\n inc = float(rec[\"measurement_inc\"])\n moment = float(rec[\"measurement_magn_moment\"])\n if 'LP-PI-M' not in methcodes:\n first_I.append([273, 0., 0., 0., 1])\n first_Z.append([273, dec, inc, moment, 1]) # NRM step\n else:\n first_I.append([0, 0., 0., 0., 1])\n first_Z.append([0, dec, inc, moment, 1]) # NRM step\n\n #---------------------\n # find IZ and ZI\n #---------------------\n\n for temp in Treat_I: # look through infield steps and find matching Z step\n if temp in Treat_Z: # found a match\n istep = ISteps[Treat_I.index(temp)]\n irec = datablock[istep]\n methcodes = []\n tmp = irec[\"magic_method_codes\"].split(\":\")\n for meth in tmp:\n methcodes.append(meth.strip())\n # take last record as baseline to subtract\n brec = datablock[istep - 1]\n zstep = ZSteps[Treat_Z.index(temp)]\n zrec = datablock[zstep]\n # sort out first_Z records\n # check if ZI/IZ in in method codes:\n ZI = \"\"\n if \"LP-PI-TRM-IZ\" in methcodes or \"LP-PI-M-IZ\" in methcodes or \"LP-PI-IZ\" in methcodes:\n ZI = 0\n elif \"LP-PI-TRM-ZI\" in methcodes or \"LP-PI-M-ZI\" in methcodes or \"LP-PI-ZI\" in methcodes:\n ZI = 1\n elif \"LP-PI-BT-IZZI\" in methcodes:\n ZI == \"\"\n i_intex, z_intex = 0, 0\n foundit = False\n for i in range(len(datablock)):\n if THERMAL:\n if ('treatment_temp' in list(datablock[i].keys()) and float(temp) == float(datablock[i]['treatment_temp'])):\n foundit = True\n if MICROWAVE:\n if ('treat_mw_step' in list(datablock[i].keys())):\n ThisStep=float(datablock[i]['treat_mw_step'])\n if ThisStep == float(temp):\n foundit = True\n\n elif ('measurement_description' in list(datablock[i].keys())):\n MW_step = datablock[i][\"measurement_description\"].strip(\n '\\n').split(\":\")\n for STEP in MW_step:\n if \"Number\" in STEP:\n ThisStep = float(STEP.split(\"-\")[-1])\n if ThisStep == float(temp):\n foundit = True\n if foundit:\n if \"LT-T-Z\" in datablock[i]['magic_method_codes'].split(\":\") or \"LT-M-Z\" in datablock[i]['magic_method_codes'].split(\":\"):\n z_intex = i\n if \"LT-T-I\" in datablock[i]['magic_method_codes'].split(\":\") or \"LT-M-I\" in datablock[i]['magic_method_codes'].split(\":\"):\n i_intex = i\n foundit = False\n\n if z_intex < i_intex:\n ZI = 1\n else:\n ZI = 0\n dec = float(zrec[\"measurement_dec\"])\n inc = float(zrec[\"measurement_inc\"])\n str = float(zrec[momkey])\n first_Z.append([temp, dec, inc, str, ZI])\n # sort out first_I records\n idec = float(irec[\"measurement_dec\"])\n iinc = float(irec[\"measurement_inc\"])\n istr = float(irec[momkey])\n X = pmag.dir2cart([idec, iinc, istr])\n BL = pmag.dir2cart([dec, inc, str])\n I = []\n for c in range(3):\n I.append((X[c] - BL[c]))\n if I[2] != 0:\n iDir = pmag.cart2dir(I)\n if Zdiff == 0:\n first_I.append([temp, iDir[0], iDir[1], iDir[2], ZI])\n else:\n first_I.append([temp, 0., 0., I[2], ZI])\n# gamma=angle([iDir[0],iDir[1]],[phi,theta])\n else:\n first_I.append([temp, 0., 0., 0., ZI])\n# gamma=0.0\n# put in Gamma check (infield trm versus lab field)\n# if 180.-gamma<gamma:\n# gamma=180.-gamma\n# GammaChecks.append([temp-273.,gamma])\n\n #---------------------\n # find Thellier Thellier protocol\n #---------------------\n if 'LP-PI-II'in methcodes or 'LP-PI-T-II' in methcodes or 'LP-PI-M-II' in methcodes:\n # look through infield steps and find matching Z step\n for i in range(1, len(Treat_I)):\n if Treat_I[i] == Treat_I[i - 1]:\n # ignore, if there are more than\n temp = Treat_I[i]\n irec1 = datablock[ISteps[i - 1]]\n dec1 = float(irec1[\"measurement_dec\"])\n inc1 = float(irec1[\"measurement_inc\"])\n moment1 = float(irec1[\"measurement_magn_moment\"])\n if len(first_I) < 2:\n dec_initial = dec1\n inc_initial = inc1\n cart1 = np.array(pmag.dir2cart([dec1, inc1, moment1]))\n irec2 = datablock[ISteps[i]]\n dec2 = float(irec2[\"measurement_dec\"])\n inc2 = float(irec2[\"measurement_inc\"])\n moment2 = float(irec2[\"measurement_magn_moment\"])\n cart2 = np.array(pmag.dir2cart([dec2, inc2, moment2]))\n\n # check if its in the same treatment\n if Treat_I[i] == Treat_I[i - 2] and dec2 != dec_initial and inc2 != inc_initial:\n continue\n if dec1 != dec2 and inc1 != inc2:\n zerofield = (cart2 + cart1) / 2\n infield = (cart2 - cart1) / 2\n\n DIR_zerofield = pmag.cart2dir(zerofield)\n DIR_infield = pmag.cart2dir(infield)\n\n first_Z.append(\n [temp, DIR_zerofield[0], DIR_zerofield[1], DIR_zerofield[2], 0])\n first_I.append(\n [temp, DIR_infield[0], DIR_infield[1], DIR_infield[2], 0])\n\n #---------------------\n # find pTRM checks\n #---------------------\n\n for i in range(len(Treat_PI)): # look through infield steps and find matching Z step\n\n temp = Treat_PI[i]\n k = PISteps[i]\n rec = datablock[k]\n dec = float(rec[\"measurement_dec\"])\n inc = float(rec[\"measurement_inc\"])\n moment = float(rec[\"measurement_magn_moment\"])\n phi = float(rec[\"treatment_dc_field_phi\"])\n theta = float(rec[\"treatment_dc_field_theta\"])\n M = np.array(pmag.dir2cart([dec, inc, moment]))\n\n foundit = False\n if 'LP-PI-II' not in methcodes:\n # Important: suport several pTRM checks in a row, but\n # does not support pTRM checks after infield step\n for j in range(k, 1, -1):\n if \"LT-M-I\" in datablock[j]['magic_method_codes'] or \"LT-T-I\" in datablock[j]['magic_method_codes']:\n after_zerofield = 0.\n foundit = True\n prev_rec = datablock[j]\n zerofield_index = j\n break\n if float(datablock[j]['treatment_dc_field']) == 0:\n after_zerofield = 1.\n foundit = True\n prev_rec = datablock[j]\n zerofield_index = j\n break\n else: # Thellier-Thellier protocol\n foundit = True\n prev_rec = datablock[k - 1]\n zerofield_index = k - 1\n if foundit:\n prev_dec = float(prev_rec[\"measurement_dec\"])\n prev_inc = float(prev_rec[\"measurement_inc\"])\n prev_moment = float(prev_rec[\"measurement_magn_moment\"])\n prev_phi = float(prev_rec[\"treatment_dc_field_phi\"])\n prev_theta = float(prev_rec[\"treatment_dc_field_theta\"])\n prev_M = np.array(pmag.dir2cart(\n [prev_dec, prev_inc, prev_moment]))\n\n if 'LP-PI-II' not in methcodes:\n diff_cart = M - prev_M\n diff_dir = pmag.cart2dir(diff_cart)\n if after_zerofield == 0:\n ptrm_check.append(\n [temp, diff_dir[0], diff_dir[1], diff_dir[2], zerofield_index, after_zerofield])\n else:\n ptrm_check.append(\n [temp, diff_dir[0], diff_dir[1], diff_dir[2], zerofield_index, after_zerofield])\n else:\n # health check for T-T protocol:\n if theta != prev_theta:\n diff = (M - prev_M) / 2\n diff_dir = pmag.cart2dir(diff)\n ptrm_check.append(\n [temp, diff_dir[0], diff_dir[1], diff_dir[2], zerofield_index, \"\"])\n else:\n print(\n \"-W- WARNING: specimen. pTRM check not in place in Thellier Thellier protocol. step please check\")\n\n #---------------------\n # find Tail checks\n #---------------------\n\n for temp in Treat_M:\n # print temp\n step = MSteps[Treat_M.index(temp)]\n rec = datablock[step]\n dec = float(rec[\"measurement_dec\"])\n inc = float(rec[\"measurement_inc\"])\n moment = float(rec[\"measurement_magn_moment\"])\n foundit = False\n for i in range(1, len(datablock)):\n if 'LT-T-Z' in datablock[i]['magic_method_codes'] or 'LT-M-Z' in datablock[i]['magic_method_codes']:\n if (THERMAL and \"treatment_temp\" in list(datablock[i].keys()) and float(datablock[i][\"treatment_temp\"]) == float(temp))\\\n or (MICROWAVE and \"measurement_description\" in list(datablock[i].keys()) and \"Step Number-%.0f\" % float(temp) in datablock[i][\"measurement_description\"]):\n prev_rec = datablock[i]\n prev_dec = float(prev_rec[\"measurement_dec\"])\n prev_inc = float(prev_rec[\"measurement_inc\"])\n prev_moment = float(\n prev_rec[\"measurement_magn_moment\"])\n foundit = True\n break\n\n if foundit:\n ptrm_tail.append([temp, 0, 0, moment - prev_moment])\n\n #\n # final check\n #\n if len(first_Z) != len(first_I):\n print(len(first_Z), len(first_I))\n print(\" Something wrong with this specimen! Better fix it or delete it \")\n input(\" press return to acknowledge message\")\n\n #---------------------\n # find Additivity (patch by rshaar)\n #---------------------\n\n additivity_check = []\n for i in range(len(Treat_AC)):\n step_0 = ACSteps[i]\n temp = Treat_AC[i]\n dec0 = float(datablock[step_0][\"measurement_dec\"])\n inc0 = float(datablock[step_0][\"measurement_inc\"])\n moment0 = float(datablock[step_0]['measurement_magn_moment'])\n V0 = pmag.dir2cart([dec0, inc0, moment0])\n # find the infield step that comes before the additivity check\n foundit = False\n for j in range(step_0, 1, -1):\n if \"LT-T-I\" in datablock[j]['magic_method_codes']:\n foundit = True\n break\n if foundit:\n dec1 = float(datablock[j][\"measurement_dec\"])\n inc1 = float(datablock[j][\"measurement_inc\"])\n moment1 = float(datablock[j]['measurement_magn_moment'])\n V1 = pmag.dir2cart([dec1, inc1, moment1])\n # print \"additivity check: \",s\n # print j\n # print \"ACC=V1-V0:\"\n # print \"V1=\",[dec1,inc1,moment1],pmag.dir2cart([dec1,inc1,moment1])/float(datablock[0][\"measurement_magn_moment\"])\n # print \"V1=\",pmag.dir2cart([dec1,inc1,moment1])/float(datablock[0][\"measurement_magn_moment\"])\n # print \"V0=\",[dec0,inc0,moment0],pmag.dir2cart([dec0,inc0,moment0])/float(datablock[0][\"measurement_magn_moment\"])\n # print \"NRM=\",float(datablock[0][\"measurement_magn_moment\"])\n # print \"-------\"\n\n I = []\n for c in range(3):\n I.append(V1[c] - V0[c])\n dir1 = pmag.cart2dir(I)\n additivity_check.append([temp, dir1[0], dir1[1], dir1[2]])\n # print\n # \"I\",np.array(I)/float(datablock[0][\"measurement_magn_moment\"]),dir1,\"(dir1\n # unnormalized)\"\n X = np.array(I) / \\\n float(datablock[0][\"measurement_magn_moment\"])\n # print \"I\",np.sqrt(sum(X**2))\n araiblock = (first_Z, first_I, ptrm_check, ptrm_tail,\n zptrm_check, GammaChecks, additivity_check)\n\n return araiblock, field", "def age_flat(self, z):\n if self.k0 != 0.0:\n raise cex.CosmologyUnapplicable(\"Not for Omega_k != 0\")\n \n \n om = self.m0\n lam = 1. - om\n t_z = (2.*np.arcsinh(sqrt(lam/om)*np.power((1.+z),(-3./2.)))/\n (cc.H100_s*self.h*3.*sqrt(lam))\n )\n return(t_z)", "def z(mB, mM, q2, t0=None):\n tm = (mB-mM)**2\n tp = (mB+mM)**2\n if t0 is None:\n t0 = tp*(1-sqrt(1-tm/tp))\n elif t0 == 'tm':\n t0 = tm\n sq2 = sqrt(tp-q2)\n st0 = sqrt(tp-t0)\n return (sq2-st0)/(sq2+st0)", "def ra2phi(ra: float) -> float:\n return np.pi / 180.0 * ra", "def ransac_iters(w=0.5, d=min_num_pairs(), z=0.99):\n\n return int(np.ceil(np.log(1 - z) / np.log(1 - np.power(w,d))))", "def _q_z(self):\n D = self.latt_par['D'].value\n lambda_r = self.latt_par['lambda_r'].value\n gamma = self.latt_par['gamma'].value\n return 2*np.pi*(self.h/D - self.k/lambda_r/np.tan(gamma))", "def invert_algebraic(surface, RT: RadiativeTransfer, instrument, x_surface, \n x_RT, x_instrument, meas, geom):\n\n # Get atmospheric optical parameters (possibly at high\n # spectral resolution) and resample them if needed.\n rhi = RT.get_shared_rtm_quantities(x_RT, geom)\n wl, fwhm = instrument.calibration(x_instrument)\n rhoatm = instrument.sample(x_instrument, RT.wl, rhi['rhoatm'])\n transm = instrument.sample(x_instrument, RT.wl, rhi['transm'])\n solar_irr = instrument.sample(x_instrument, RT.wl, RT.solar_irr)\n sphalb = instrument.sample(x_instrument, RT.wl, rhi['sphalb'])\n transup = instrument.sample(x_instrument, RT.wl, rhi['transup'])\n coszen = RT.coszen\n\n #Prevent NaNs\n transm[transm == 0] = 1e-5\n\n # Calculate the initial emission and subtract from the measurement.\n # Surface and measured wavelengths may differ.\n Ls = surface.calc_Ls(x_surface, geom)\n Ls_meas = interp1d(surface.wl, Ls, fill_value='extrapolate')(wl)\n rdn_solrfl = meas - (transup * Ls_meas)\n\n # Now solve for the reflectance at measured wavelengths,\n # and back-translate to surface wavelengths\n rho = rdn_solrfl * np.pi / (solar_irr * coszen)\n rfl = 1.0 / (transm / (rho - rhoatm) + sphalb)\n rfl[rfl > 1.0] = 1.0\n rfl_est = interp1d(wl, rfl, fill_value='extrapolate')(surface.wl)\n\n # Some downstream code will benefit from our precalculated\n # atmospheric optical parameters\n coeffs = rhoatm, sphalb, transm, solar_irr, coszen, transup\n return rfl_est, Ls, coeffs", "def lookback_time(self, z):\n\n # Calculate the integrand.\n def f(z1):\n return 1.0 / (self.H(z1) * (1 + z1))\n\n return _intf_0_z(f, z) / self._unit_time", "def calc_R(xc, yc, zc,x,y,z):\n return sqrt((x - xc) ** 2 + (y - yc) ** 2 + (z - zc) ** 2)", "def c_s(self, z):\n R = self.R_bg(z)\n return const.c/np.sqrt(3.*(1.+R))", "def quintic_polynomials_planner(sx, sy, syaw, sv, sa, gx, gy, gyaw, gv, ga,\n max_accel, max_jerk, dt):\n\n vxs = sv * math.cos(syaw)\n vys = sv * math.sin(syaw)\n vxg = gv * math.cos(gyaw)\n vyg = gv * math.sin(gyaw)\n\n axs = sa * math.cos(syaw)\n ays = sa * math.sin(syaw)\n axg = ga * math.cos(gyaw)\n ayg = ga * math.sin(gyaw)\n\n time, rx, ry, ryaw, rv, ra, rj = [], [], [], [], [], [], []\n\n for T in np.arange(MIN_T, MAX_T, MIN_T):\n xqp = QuinticPolynomial(sx, vxs, axs, gx, vxg, axg, T)\n yqp = QuinticPolynomial(sy, vys, ays, gy, vyg, ayg, T)\n\n time, rx, ry, ryaw, rv, ra, rj = [], [], [], [], [], [], []\n\n for t in np.arange(0.0, T + dt, dt):\n time.append(t)\n rx.append(xqp.calc_point(t))\n ry.append(yqp.calc_point(t))\n\n vx = xqp.calc_first_derivative(t)\n vy = yqp.calc_first_derivative(t)\n v = np.hypot(vx, vy)\n yaw = math.atan2(vy, vx)\n rv.append(v)\n ryaw.append(yaw)\n\n ax = xqp.calc_second_derivative(t)\n ay = yqp.calc_second_derivative(t)\n a = np.hypot(ax, ay)\n if len(rv) >= 2 and rv[-1] - rv[-2] < 0.0:\n a *= -1\n ra.append(a)\n\n jx = xqp.calc_third_derivative(t)\n jy = yqp.calc_third_derivative(t)\n j = np.hypot(jx, jy)\n if len(ra) >= 2 and ra[-1] - ra[-2] < 0.0:\n j *= -1\n rj.append(j)\n\n if max([abs(i) for i in ra]) <= max_accel and \\\n max([abs(i) for i in rj]) <= max_jerk:\n print(\"find path!!\")\n break\n\n return time, rx, ry, ryaw, rv, ra, rj", "def stump_S(z) :\n\n if z > 0:\n sz = sqrt(z) \n return (sz - sin(sz))/pow(sz,3)\n elif z < 0 :\n s_z = sqrt(-z) \n # According to the equation the denominatori is pow(sqrt(z),3)\n return (sinh(s_z) - s_z)/pow(s_z,3)\n else :\n return 0.1666666666666666", "def cbrt(self):\r\n getcontext().prec += 2\r\n off = self.__class__(-0.5, dec.Decimal(0.75).sqrt()) # (-0.5+0.866j)\r\n mod = cbrt(abs(self))\r\n try:\r\n arg = atan2(self._imag, self._real) / 3\r\n except InvalidOperationError:\r\n arg = 0\r\n rt1 = self.__class__.from_polar(mod, arg)\r\n rt2 = rt1 * off\r\n rt3 = rt2 * off\r\n getcontext().prec -= 2\r\n return (+rt1, +rt2, +rt3)", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def find_greatcircle(ra_deg, dec_deg):\n \n #stream = stream_model(name, pparams0=pparams, dt=dt)\n \n ## find the pole\n #ra = np.radians(stream.obs[0])\n #dec = np.radians(stream.obs[1])\n ra = np.radians(ra_deg)\n dec = np.radians(dec_deg)\n \n rx = np.cos(ra) * np.cos(dec)\n ry = np.sin(ra) * np.cos(dec)\n rz = np.sin(dec)\n r = np.column_stack((rx, ry, rz))\n #r = sph2cart(ra, dec)\n\n # fit the plane\n x0 = np.array([0, 1, 0])\n lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,))\n x0 = lsq.x/np.linalg.norm(lsq.x)\n ra0 = np.arctan2(x0[1], x0[0])\n dec0 = np.arcsin(x0[2])\n \n ra0 += np.pi\n dec0 = np.pi/2 - dec0\n\n # euler rotations\n R0 = myutils.rotmatrix(np.degrees(-ra0), 2)\n R1 = myutils.rotmatrix(np.degrees(dec0), 1)\n R2 = myutils.rotmatrix(0, 2)\n R = np.dot(R2, np.matmul(R1, R0))\n \n xi, eta = myutils.rotate_angles(ra_deg, dec_deg, R)\n \n # put xi = 50 at the beginning of the stream\n xi[xi>180] -= 360\n xi += 360\n xi0 = np.min(xi) - 50\n R2 = myutils.rotmatrix(-xi0, 2)\n R = np.dot(R2, np.matmul(R1, R0))\n xi, eta = myutils.rotate_angles(ra_deg, dec_deg, R)\n \n return R", "def _derZ(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.y_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha * beta * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha * beta * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha * beta * self.wInterpolators[i][j][k](w[c])\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * beta\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + alpha\n * (1 - beta)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * beta\n * self.wInterpolators[i][j][k - 1](w[c])\n )\n ) / (self.z_list[k] - self.z_list[k - 1])\n return dfdz", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb" ]
[ "0.5970234", "0.586566", "0.5805997", "0.5770309", "0.57532215", "0.5680772", "0.566361", "0.5569986", "0.5537452", "0.5494817", "0.5490782", "0.5471373", "0.54355586", "0.54301345", "0.5428771", "0.54233015", "0.54138273", "0.5389946", "0.53683394", "0.53642684", "0.5326583", "0.5326509", "0.53183603", "0.5315829", "0.53021896", "0.53000104", "0.52969205", "0.5288066", "0.528287", "0.5268439", "0.52608335", "0.52603865", "0.5243416", "0.5229551", "0.5221554", "0.52119344", "0.5208131", "0.5202403", "0.5190058", "0.5186938", "0.51706475", "0.5170313", "0.516498", "0.5164476", "0.5152348", "0.51458704", "0.51417947", "0.5127674", "0.512617", "0.5110511", "0.5106587", "0.5098651", "0.50875926", "0.5078536", "0.50779223", "0.5077885", "0.5076157", "0.5074694", "0.50720304", "0.5071422", "0.5070652", "0.5068149", "0.5066571", "0.50559026", "0.5051032", "0.50487053", "0.5045939", "0.50440776", "0.5039338", "0.50362676", "0.5029863", "0.50229394", "0.5021864", "0.50215125", "0.50199866", "0.50183064", "0.5016711", "0.50146955", "0.50136465", "0.5013412", "0.50096726", "0.5008974", "0.5006506", "0.5005688", "0.50030637", "0.50024855", "0.5001488", "0.49988568", "0.49983418", "0.49968445", "0.49916428", "0.49872285", "0.49859986", "0.49847922", "0.49826846", "0.49768516", "0.49740013", "0.49736294", "0.49720818", "0.49696857", "0.4969578" ]
0.0
-1
smooth sharp_edge_image with Gaussian function 1. The sharpedge image is convoluted with a gassian kernel 2. The convolution normalized
def gauss_edge(sharp_edge_image, kernel_size = 7, gauss_standard_dev =3): from utilities import model_gauss from EMAN2 import rsconvolution nz = sharp_edge_image.get_ndim() if(nz == 3): kern = model_gauss(gauss_standard_dev, kernel_size , kernel_size, kernel_size) elif(nz == 2): kern = model_gauss(gauss_standard_dev, kernel_size , kernel_size) else: kern = model_gauss(gauss_standard_dev, kernel_size) aves = Util.infomask(kern, None, False) nx = kern.get_xsize() ny = kern.get_ysize() nz = kern.get_zsize() kern /= (aves[0]*nx*ny*nz) return rsconvolution(sharp_edge_image, kern)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smooth(image):\n image = convolve(image, gaussian2d(), mode='same')\n return image", "def smooth_image(self, image, mask):\n \n filter_size = self.smoothing_filter_size.value\n if filter_size == 0:\n return image\n sigma = filter_size / 2.35\n #\n # We not only want to smooth using a Gaussian, but we want to limit\n # the spread of the smoothing to 2 SD, partly to make things happen\n # locally, partly to make things run faster, partly to try to match\n # the Matlab behavior.\n #\n filter_size = max(int(float(filter_size) / 2.0),1)\n f = (1/np.sqrt(2.0 * np.pi ) / sigma * \n np.exp(-0.5 * np.arange(-filter_size, filter_size+1)**2 / \n sigma ** 2))\n def fgaussian(image):\n output = scipy.ndimage.convolve1d(image, f,\n axis = 0,\n mode='constant')\n return scipy.ndimage.convolve1d(output, f,\n axis = 1,\n mode='constant')\n #\n # Use the trick where you similarly convolve an array of ones to find \n # out the edge effects, then divide to correct the edge effects\n #\n edge_array = fgaussian(mask.astype(float))\n masked_image = image.copy()\n masked_image[~mask] = 0\n smoothed_image = fgaussian(masked_image)\n masked_image[mask] = smoothed_image[mask] / edge_array[mask]\n return masked_image", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)", "def apply_smoothing(image, kernel_size=3):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "def run_gaussian_smoothing(image, kernel_size=5):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "def edges(image):\n #store image width and height and initialize new image\n image_width = image['width']\n image_height = image['height']\n new_image = {'height': image['height'], 'width': image['width'], 'pixels': len(image['pixels'])*[0]}\n \n #sobel operator kernels\n kernel_x = {'height': 3, 'width': 3, 'pixels': [-1,0,1,-2,0,2,-1,0,1]}\n kernel_y = {'height': 3, 'width': 3, 'pixels': [-1,-2,-1,0,0,0,1,2,1]}\n \n #creating the filters\n o_x = correlate(image, kernel_x)\n o_y = correlate(image, kernel_y)\n\n #perform relvant calculation for each pixel \n for x in range(image_width):\n for y in range(image_height):\n a = ((get_pixel(o_x, x, y))**2+(get_pixel(o_y, x, y))**2)**0.5\n set_pixel(new_image, x, y, a)\n return round_and_clip_image(new_image)", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def smooth_gauss(image, variance=2, kernel_size=(9, 9)):\n return cv2.GaussianBlur(image, kernel_size, variance)", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def gaussian_highpass(image):\n lowpass = ndimage.gaussian_filter(image, 2)\n highpass = image - lowpass\n return highpass", "def apply_sharpening_on(image):\n # Create kernel\n kernel = np.array([[0, -1, 0],\n [-1, 5, -1],\n [0, -1, 0]])\n\n # Sharpen image\n sharp_image = cv2.filter2D(image, -1, kernel)\n return sharp_image", "def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))", "def enhance_edges(image):\n working_image = image.copy()\n working_image = cv2.cvtColor(working_image, cv2.COLOR_BGR2GRAY)\n # Blur away fine details.\n working_image = cv2.GaussianBlur(working_image, (5, 5), 0)\n return working_image", "def smooth_scipy(self, mri_data):\n\n # image dimension\n if hasattr(mri_data.img_header, 'info'):\n dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])\n elif hasattr(mri_data.img_header, 'get_zooms'):\n dx, dy, dz = mri_data.img_header.get_zooms()[:3]\n else:\n self.errmsg(\"No voxel size information in mri_data header\")\n\n # Set gaussian sigma in image dimension\n sigma = (self.blur_fwhm / np.array((dx, dy, dz))) / 2.354820\n imgdata = mri_data.img_data.astype(np.float64)\n\n # Apply mask\n if hasattr(self, 'maskV'):\n imgdata[~self.maskV] = 0\n\n # Apply Gaussian filter\n filt_img = gaussian_filter(imgdata, sigma, mode='constant')\n\n if hasattr(self, 'maskV'):\n # Adjust voxels with out of the mask (0) convolution\n aux_img = np.ones_like(imgdata)\n aux_img[~self.maskV] = 0\n filt_aux_img = gaussian_filter(aux_img, sigma, mode='constant')\n filt_img[self.maskV] /= filt_aux_img[self.maskV]\n\n return filt_img.astype(mri_data.img_data.dtype)", "def smooth_image(img_file=\"cy_double.png\"):\n \n oldimg, newimg, width, height, win = setup_image(img_file)\n\n for col in range(newimg.getWidth()):\n for row in range(newimg.getHeight()):\n p = newimg.getPixel(col, row)\n neighbors = []\n # Put the 8 surrounding pixels into neighbors\n for i in range(col-1, col+2):\n for j in range(row-1, row+2):\n try:\n neighbor = newimg.getPixel(i, j)\n neighbors.append(neighbor)\n except:\n continue\n nlen = len(neighbors)\n # Average out the RBG values\n if nlen:\n # Uncommented, the following line would leave most of the white \n # untouched which works a little better for real photographs, imo.\n #~ if nlen and p[0]+p[1]+p[2] < 690:\n p.red = sum([neighbors[i][0] for i in range(nlen)])/nlen\n p.green = sum([neighbors[i][1] for i in range(nlen)])/nlen\n p.blue = sum([neighbors[i][2] for i in range(nlen)])/nlen\n newimg.setPixel(col,row,p)\n\n write_image(img_file, newimg, win, \"_smooth\")", "def smooth(im, n=15):\n g = gaussKern(n)\n improc = signal.convolve2d(im, g, mode='same', boundary='symm')\n return improc", "def gauss_convolution(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def smoothImage(img):\n # Pillow uses RGB and cv2 uses GBR, so have to convert before and after smoothing\n imgBGR = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)\n # smoothImgBGR = cv2.fastNlMeansDenoisingColored(imgBGR, None, 10,10,7,21)\n smoothImgBGR = cv2.bilateralFilter(imgBGR, 9, 75, 75)\n smoothImgRGB = cv2.cvtColor(smoothImgBGR, cv2.COLOR_BGR2RGB)\n return Image.fromarray(smoothImgRGB)", "def create_external_edge_force_gradients_from_img( img, sigma=30. ):\n # Gaussian smoothing.\n smoothed = filt.gaussian( (img-img.min()) / (img.max()-img.min()), sigma )\n # Gradient of the image in x and y directions.\n giy, gix = np.gradient( smoothed )\n # Gradient magnitude of the image.\n gmi = (gix**2 + giy**2)**(0.5)\n # Normalize. This is crucial (empirical observation).\n gmi = (gmi - gmi.min()) / (gmi.max() - gmi.min())\n\n # Gradient of gradient magnitude of the image in x and y directions.\n ggmiy, ggmix = np.gradient( gmi )\n\n def fx(x, y):\n \"\"\"\n Return external edge force in the x direction.\n\n x: ndarray\n numpy array of floats.\n y: ndarray:\n numpy array of floats.\n \"\"\"\n # Check bounds.\n x[ x < 0 ] = 0.\n y[ y < 0 ] = 0.\n\n x[ x > img.shape[1]-1 ] = img.shape[1]-1\n y[ y > img.shape[0]-1 ] = img.shape[0]-1\n\n return ggmix[ (y.round().astype(int), x.round().astype(int)) ]\n\n def fy(x, y):\n \"\"\"\n Return external edge force in the y direction.\n\n x: ndarray\n numpy array of floats.\n y: ndarray:\n numpy array of floats.\n \"\"\"\n # Check bounds.\n x[ x < 0 ] = 0.\n y[ y < 0 ] = 0.\n\n x[ x > img.shape[1]-1 ] = img.shape[1]-1\n y[ y > img.shape[0]-1 ] = img.shape[0]-1\n\n return ggmiy[ (y.round().astype(int), x.round().astype(int)) ]\n\n return fx, fy", "def sharpen(img, ker = (9,9), sigX=10.0):\n gaus = cv2.GaussianBlur(img, ker, sigX)\n unsharp = cv2.addWeighted(img, 1.5, gaus, -0.5, 0, img)\n return unsharp", "def smooth_2d(res_array, window_len):\n\n gx, gy = np.mgrid[-window_len : window_len + 1, -window_len : window_len + 1]\n\n gauss = np.exp(-(gx ** 2 / float(window_len) + gy ** 2 / float(window_len)))\n gauss /= gauss.sum()\n\n smooth_array = sps.convolve(res_array, gauss, mode=\"same\")\n\n return smooth_array", "def sharpen(im):\n kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n im = cv2.filter2D(im, -1, kernel)\n return im", "def convolveAndDownsample(img):\n # Select every other pixel from G\n G = sp.signal.convolve2d(img, guassianFilter, 'same')\n return G[::2, ::2]", "def make_glow_model(im_in, bin_x=1, bin_y=1):\n im=im_in.copy()\n im[0]=im[2]\n im[1]=im[2]\n im[-1]=im[-2]\n \n #glow image\n glow=np.zeros_like(im)\n \n #meshgrid\n x, y = np.meshgrid(np.arange(im.shape[1]), np.arange(im.shape[0]))\n \n \n def model_corner(im, x0, y0, xw, yw, iparams, std_clip=0):\n \"\"\" std_clip is the y height of the small corner to use to exclude\n spectra in the large corner,\n \n (iparams=(glow amp, x center, y center, xwid, ywid, xy amount)\n \n positions and initial params adjusted automatically for binning\n pass coordinates in 4k positions\n \"\"\"\n x0/=bin_x\n y0/=bin_y\n xw/=bin_x\n yw/=bin_y\n iparams=list(iparams)\n iparams[1]/=bin_x\n iparams[2]/=bin_y\n iparams[3]/=bin_x\n iparams[4]/=bin_y\n \n corner=im[y0:y0+yw,x0:x0+xw].copy()\n if std_clip:\n small_corner=im[y0:y0+std_clip,x0:x0+xw].copy()\n patch_locs=corner>2*small_corner.std()\n patch_locs[:y0+std_clip,:]=False\n corner[patch_locs]=np.median(small_corner)\n cim, param= gaussfit2D(corner, iparams)\n param=list(param)\n param[-1]=0\n param[1]+=x0\n param[2]+=y0\n return gauss2D(( x,y), *param)\n \n #Lower R\n try:\n tmp=model_corner(im, 3996, 2, 100, 100,\n (150, 58, -7, 30.0, 20.0, 0, 0))\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Lower R glow model failed: {}'.format(str(e))\n\n #Lower L\n try:\n tmp=model_corner(im, 0, 2, 100, 100,\n (150, 40, 0, 30.0, 20.0, 0, 0),\n std_clip=50)\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Lower L glow model failed: {}'.format(str(e))\n \n\n #Upper L\n try:\n tmp=model_corner(im, 0, 4012, 100, 100,\n (150, 40, 100, 30.0, 20.0, 0, 0))\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Upper L glow model failed: {}'.format(str(e))\n\n #Upper R\n try:\n tmp=model_corner(im, 3996, 4000, 100, 100,\n (150, 58, 100, 30.0, 20.0, 0, 0))\n if tmp.min() < 0:\n raise RuntimeError('Glow model has negative values')\n else:\n glow+=tmp\n\n except RuntimeError, e:\n print 'Upper R glow model failed: {}'.format(str(e))\n \n return glow", "def edgesMarrHildreth(img, sigma):\n\tsize = int(2*(np.ceil(3*sigma))+1)\n\n\tx, y = np.meshgrid(np.arange(-size/2+1, size/2+1), np.arange(-size/2+1, size/2+1))\n\t\n\tnormal = 1 / (2.0 * np.pi * sigma**2)\n\n\tkernel = ((x**2 + y**2 - (2.0*sigma**2)) / sigma**4) * np.exp(-(x**2+y**2) / (2.0*sigma**2)) / normal # LoG filter\n\n\tkern_size = kernel.shape[0]\n\tlog = np.zeros_like(img, dtype=float)\n\n\t# applying filter\n\tfor i in range(img.shape[0]-(kern_size-1)):\n\t\tfor j in range(img.shape[1]-(kern_size-1)):\n\t\t\twindow = img[i:i+kern_size, j:j+kern_size] * kernel\n\t\t\tlog[i,j] = np.sum(window)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\n\tlog = log.astype(np.int64, copy=False)\n\n\tzero_crossing = np.zeros_like(log)\n\n\t# computing zero crossing\n\tfor i in range(log.shape[0]-(kern_size-1)):\n\t\tfor j in range(log.shape[1]-(kern_size-1)):\n\t\t\tif log[i][j] == 0:\n\t\t\t\tif (log[i][j-1] < 0 and log[i][j+1] > 0) or (log[i][j-1] < 0 and log[i][j+1] < 0) or (log[i-1][j] < 0 and log[i+1][j] > 0) or (log[i-1][j] > 0 and log[i+1][j] < 0):\n\t\t\t\t\tzero_crossing[i][j] = 255\n\t\t\tif log[i][j] < 0:\n\t\t\t\tif (log[i][j-1] > 0) or (log[i][j+1] > 0) or (log[i-1][j] > 0) or (log[i+1][j] > 0):\n\t\t\t\t\tzero_crossing[i][j] = 255 \n\n\t# plotting images\n\tfig = plt.figure()\n\ta =fig.add_subplot(1,2,1)\n\timgplot = plt.imshow(log, cmap='gray')\n\ta.set_title('Laplacian of Gaussian')\n\ta = fig.add_subplot(1,2,2)\n\timgplot = plt.imshow(zero_crossing, cmap='gray')\n\tstring = 'Zero Crossing sigma = '\n\tstring += (str(sigma))\n\ta.set_title(string)\t\n\tplt.show()\n\t\n\treturn zero_crossing", "def g2dfwhm(img):\n npix = img.shape[0]\n rowCen,colCen = adaptiveCentroid(img,1.1/scale)\n row,col = np.mgrid[0:npix,0:npix]\n row = row - rowCen\n col = col - colCen\n A0,sigmac0 = moments(img)\n sigmar0 = sigmac0\n rho0 = 0.\n B0 = 0.\n p0=np.array([sigmac0,sigmar0,rho0,A0, B0])\n def residualg2d(p,x,y,xc,yc,I):\n sigmax,sigmay,rho,A,B = p\n Ierr = np.sqrt(abs(I))+0.00001 # to avoid those = 0, add a small number \n res = (gaussian2d(x,y,xc,yc,sigmax,sigmay,rho,A,B) - I)/Ierr\n return res.flatten()\n p = leastsq(residualg2d,p0,args=(col,row,colCen,rowCen,img))[0]\n sigmac,sigmar,rho,A,B = p\n Mcc = sigmac**2\n Mrr = sigmar**2\n Mrc = rho**2*Mcc*Mrr\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhm_g2d = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n #fwhm = np.sqrt(M20/2.)*2.35482*scale\n return A, B, whiskerLength, fwhm_g2d", "def apply_smoothstep(image):\n image_out = 3 * image**2 - 2 * image**3\n return image_out", "def sharp_ground(X):\n return img_conv(X, kernel_sharp)", "def edge_detect(image):\n smoothed = smooth(image)\n g_mag, g_theta = gradient(smoothed)\n nms_image = nms(g_mag, g_theta)\n thresholded = hysteresis_threshold(nms_image, g_theta)\n return smoothed, g_mag, nms_image, thresholded", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def border(img,sigma=10.0,h=8):\n g = gauss_kern(sigma,h)\n img_smooth = signal.convolve(img, g, mode='same')\n Iy, Ix = np.gradient(img_smooth)\n absGradI=np.sqrt(Ix**2+Iy**2)\n return 1 / (1+absGradI**2)", "def gaussbroad(w, s, hwhm):\n \"\"\"\n History\n --------\n Dec-90 GB,GM\n Rewrote with fourier convolution algorithm.\n Jul-91 AL\n Translated from ANA to IDL.\n 22-Sep-91 JAV\n Relaxed constant dispersion check# vectorized, 50% faster.\n 05-Jul-92 JAV\n Converted to function, handle nonpositive hwhm.\n Oct-18 AW\n Python version\n \"\"\"\n\n # Warn user if hwhm is negative.\n if hwhm < 0:\n logger.warning(\"Forcing negative smoothing width to zero.\")\n\n # Return input argument if half-width is nonpositive.\n if hwhm <= 0:\n return s # true: no broadening\n\n # Calculate (uniform) dispersion.\n nw = len(w) ## points in spectrum\n wrange = w[-1] - w[0]\n dw = wrange / (nw - 1) # wavelength change per pixel\n\n # Make smoothing gaussian; extend to 4 sigma.\n # 4.0 / sqrt(2.0 * alog(2.0)) = 3.3972872\n # sqrt(alog(2.0)) = 0.83255461\n # sqrt(alog(2.0) / pi) = 0.46971864\n # (*1.0000632 to correct for >4 sigma wings)\n if hwhm >= 5 * wrange:\n return np.full(nw, np.sum(s) / nw)\n ## points in half gaussian\n nhalf = int(3.3972872 * hwhm / dw)\n ## points in gaussian (odd!)\n ng = 2 * nhalf + 1\n # wavelength scale of gaussian\n wg = dw * (np.arange(ng, dtype=float) - (ng - 1) / 2)\n # convenient absisca\n xg = (0.83255461 / hwhm) * wg\n # unit area gaussian w / FWHM\n gpro = (0.46974832 * dw / hwhm) * np.exp(-xg * xg)\n gpro = gpro / np.sum(gpro)\n\n # Pad spectrum ends to minimize impact of Fourier ringing.\n sout = convolve(s, gpro, mode=\"nearest\")\n\n return sout", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def flatten(img,sigma=20.) :\n\n for i in range(img.shape[0]) :\n img[i] /= np.median(img[i])\n for i in range(img.shape[1]) :\n img[:,i] /= np.median(img[:,i])\n\n hw=int(3*sigma)\n u=np.linspace(-hw,hw,2*hw+1)\n x=np.tile(u,(2*hw+1,1))\n y=x.T\n k=np.exp(-x**2/2/sigma**2-y**2/2/sigma**2)\n k /= np.sum(k)\n smooth=convolve2d(img,k,weight=None)\n img /= smooth\n\n return img", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def moffat_convolution(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\t\n im_kernel_array = gauss_kernel(n_fwhm,beta,r_s)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def __gaussian_blur(self, img, kernel_size=3):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def blurImage1(in_image: np.ndarray, kernel_size: np.ndarray) -> np.ndarray:\r\n size = kernel_size[0]\r\n sigma = 1\r\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\r\n normal = 1 / (2.0 * np.pi * sigma ** 2)\r\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2))) * normal\r\n in_image = cv2.filter2D(in_image, -1, g)\r\n return in_image", "def gaussian_blur(self,img):\n return cv2.GaussianBlur(img, (self.kernel_size, self.kernel_size), 0)", "def canny(img, kernel_size=5, sigma=1.4, high=20, low=15):\n ### YOUR CODE HERE\n gaussiankernel = gaussian_kernel(kernel_size, sigma)\n smoothed_image = conv(img, kernel)\n G, theta = gradient(smoothed_image)\n nms = non_maximum_suppression(G, theta)\n strong_edges, weak_edges = double_thresholding(nms, high, low)\n edge = link_edges(strong_edges, weak_edges)\n ### END YOUR CODE\n\n return edge", "def Get_2d_smoothed_activation( MNI_coords, kernel_width=10 ):\n MNI_coords = MNI_coords[:, :2].astype('int') + 100\n\n arr = np.zeros((200,200))\n arr[ MNI_coords[:,0], MNI_coords[:,1]] = 1\n\n return gaussian_filter( arr, kernel_width )", "def smooth_gray_image(raw_img):\n return cv2.blur(raw_img, (5, 5))", "def myHybridImages(lowImage: np.ndarray, lowSigma: float, highImage: np.ndarray, highSigma: float) -> np.ndarray:\n\n # Your code here.\n lowFilteredImage = convolve(lowImage, makeGaussianKernel(lowSigma))\n print(\"the picture should be below\")\n plt.imshow(lowFilteredImage)\n #plt.show()\n print(\"the picture should be upper\")\n \n highFilteredImage = highImage - convolve(highImage, makeGaussianKernel(highSigma)\n plt.imshow(highFilteredImage)\n plt.show()\n hybridImage = lowFilteredImage + highFilteredImage\n #print(lowFilteredImage)\n #print(highFilteredImage)\n #print(hybridImage)\n return hybridImage", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def canny_edge_detection(img0, ksize=5, sigma=1.44, percent=0.8, ratio=0.4):\n\n height, width = np.shape(img0)\n\n # Gaussian blur\n image = cv2.GaussianBlur(img0, (ksize, ksize), sigma)\n\n # Generate the image gradient and the gradient direction\n image_gradient = np.zeros((height, width))\n gradient_direction = np.zeros((height, width))\n\n # Generate the image gradient along the two axes, by using Sobel operator\n x_gradient = cv2.Sobel(image, -1, 1, 0)\n x_gradient = x_gradient.astype(np.uint8)\n\n y_gradient = cv2.Sobel(image, -1, 0, 1)\n y_gradient = y_gradient.astype(np.uint8)\n\n # Obtain the gradient direction\n for idx1 in range(height):\n for idx2 in range(width):\n image_gradient[idx1, idx2] = np.sqrt(x_gradient[idx1, idx2]**2+y_gradient[idx1, idx2]**2)\n theta = math.atan(y_gradient[idx1, idx2]/(x_gradient[idx1, idx2]))*180/math.pi + 90\n\n if 0 <= theta < 45:\n gradient_direction[idx1, idx2] = 2\n elif 45 <= theta < 90:\n gradient_direction[idx1, idx2] = 3\n elif 90 <= theta < 135:\n gradient_direction[idx1, idx2] = 0\n else:\n gradient_direction[idx1, idx2] = 1\n\n # Normalize\n matrix_max = np.max(image_gradient)\n image_gradient = image_gradient/matrix_max\n\n # Determine the threshold\n high_threshold = percent * np.max(image_gradient)\n low_threshold = ratio * high_threshold\n\n # Adjust the result, according to the high and low threshold\n gradient_nms_adjusted = np.zeros((height, width))\n result_image = np.zeros((height, width))\n\n # Interpolate to do non-maximum suppression\n for idx1 in range(1, height-1):\n for idx2 in range(1, width-1):\n east = image_gradient[idx1, idx2 + 1]\n south = image_gradient[idx1 + 1, idx2]\n west = image_gradient[idx1, idx2 - 1]\n north = image_gradient[idx1 - 1, idx2]\n north_east = image_gradient[idx1 - 1, idx2 + 1]\n north_west = image_gradient[idx1 - 1, idx2 - 1]\n south_west = image_gradient[idx1 + 1, idx2 - 1]\n south_east = image_gradient[idx1 + 1, idx2 + 1]\n\n # The real value of image gradient\n gradient_value, g1, g2 = image_gradient[idx1, idx2], 0, 0\n\n if gradient_direction[idx1, idx2] == 0:\n proportion = np.fabs(y_gradient[idx1, idx2] / x_gradient[idx1, idx2])\n g1 = east * (1 - proportion) + north_east * proportion\n g2 = west * (1 - proportion) + south_west * proportion\n elif gradient_direction[idx1, idx2] == 1:\n proportion = np.fabs(x_gradient[idx1, idx2] / y_gradient[idx1, idx2])\n g1 = north * (1 - proportion) + north_east * proportion\n g2 = south * (1 - proportion) + south_west * proportion\n elif gradient_direction[idx1, idx2] == 2:\n proportion = np.fabs(x_gradient[idx1, idx2] / y_gradient[idx1, idx2])\n g1 = north * (1 - proportion) + north_west * proportion\n g2 = south * (1 - proportion) + south_east * proportion\n elif gradient_direction[idx1, idx2] == 3:\n proportion = np.fabs(y_gradient[idx1, idx2] / x_gradient[idx1, idx2])\n g1 = west * (1 - proportion) + north_west * proportion\n g2 = east * (1 - proportion) + south_east * proportion\n\n # Judge whether it is possible to be an edge point\n if gradient_value >= g1 and gradient_value >= g2:\n gradient_nms_adjusted[idx1, idx2] = gradient_value\n else:\n gradient_nms_adjusted[idx1, idx2] = low_threshold\n\n # Double threshold detection\n for idx1 in range(1, height - 1):\n for idx2 in range(1, width - 1):\n # Selection by threshold\n if gradient_nms_adjusted[idx1, idx2] >= high_threshold:\n result_image[idx1, idx2] = 1\n elif gradient_nms_adjusted[idx1, idx2] <= low_threshold:\n result_image[idx1, idx2] = 0\n\n for idx1 in range(1, height - 1):\n for idx2 in range(1, width - 1):\n # Connection\n if low_threshold < gradient_nms_adjusted[idx1, idx2] < high_threshold:\n if (gradient_nms_adjusted[idx1 - 1, idx2 - 1: idx2 + 1] >= high_threshold).any() \\\n or (gradient_nms_adjusted[idx1 + 1, idx2 - 1: idx2 + 1] >= high_threshold).any() \\\n or (gradient_nms_adjusted[idx1, idx2 - 1: idx2 + 1] >= high_threshold).any():\n result_image[idx1, idx2] = 1\n else:\n result_image[idx1, idx2] = 0\n\n return result_image", "def smooth(f, g):\r\n chi_f = f.apply(lambda x: 0.0 if pd.isna(x) else 1.0)\r\n f_ext = pd.concat([f, chi_f], axis=1).prod(axis=1)\r\n a = convolve(f_ext, g)\r\n b = convolve(chi_f, g)\r\n return a.div(b)", "def gaussion_smoothing(self,sigma=None):\n print(\"## Gaussian smoothing...\");\n corr_length = self.corr_length\n if sigma is None:\n corr = self.correlation\n oscillation=np.max(np.abs(corr[:-1]-corr[1:]),axis=0)\n peak=np.max(np.abs(corr),axis=0)\n oscillation /= peak\n sigma= corr_length/(5.0*oscillation*len(corr)*self.smooth_tune) # 15.0 has been tuned for many times \n print \"sigma:\"\n print sigma\n for i in np.arange(corr_length):\n self.correlation[i] *= exp(-i*i/(2*sigma*sigma))/(sigma*sqrt(2*pi))", "def gauss_convolution_fft(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def sobelX(img):\r\n f = np.array([-1, 0, 1, -2, 0, 2, -1, 0, 1]).reshape([3, 3])\r\n return cv2.filter2D(img, cv2.CV_64F, f)", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def fit_gaussian2d(image):\n\n # Estimate center of target\n y_mean, x_mean = np.array(image.shape) // 2 # Center guess\n\n # Create model to fit\n model = models.Gaussian2D(amplitude=image.max(),\n x_mean=x_mean,\n y_mean=y_mean,\n fixed={}\n )\n\n # Fit model to grid\n fitted_model, fit = fit_model(image, model)\n\n return fitted_model", "def focus(self, smooth=0):\n if self.image is None:\n self.load_image()\n # image = self.load_image()\n # print self.image\n if not self.bw:\n gray = rgb_2_gray(self.image)\n else:\n gray = self.image\n sx = ndimage.filters.sobel(gray, axis=0, mode='constant')\n sy = ndimage.filters.sobel(gray, axis=1, mode='constant')\n sob = np.hypot(sx, sy)\n self.image = None\n self.sob = sob\n if smooth > 0:\n sob = ndimage.filters.gaussian_filter(sob, sigma=smooth)\n return sob", "def apply_gaussian_filter(mat, sigma_x, sigma_y, pad=None, mode=None):\n if mode is None:\n # Default for a sinogram image.\n mode1 = \"edge\"\n mode2 = \"mean\"\n else:\n if isinstance(mode, list) or isinstance(mode, tuple):\n mode1 = mode[0]\n mode2 = mode[1]\n else:\n mode1 = mode2 = mode\n if pad is None:\n pad = min(150, int(0.1 * min(mat.shape)))\n mat_pad = np.pad(mat, ((0, 0), (pad, pad)), mode=mode1)\n mat_pad = np.pad(mat_pad, ((pad, pad), (0, 0)), mode=mode2)\n (nrow, ncol) = mat_pad.shape\n window = make_2d_gaussian_window(nrow, ncol, sigma_x, sigma_y)\n xlist = np.arange(0, ncol)\n ylist = np.arange(0, nrow)\n x, y = np.meshgrid(xlist, ylist)\n mat_sign = np.power(-1.0, x + y)\n mat_filt = np.real(\n fft.ifft2(fft.fft2(mat_pad * mat_sign) * window) * mat_sign)\n return mat_filt[pad:nrow - pad, pad:ncol - pad]", "def edgeTaper(img, kernel):\n\n blur = convolve_image(img, kernel, mode='same', boundary='fill')\n weight = convolve_image(img * 0 + 1.0, kernel, mode='same', boundary='fill')\n deg_pad = weight * img + (1.0 - weight) * blur / weight\n return deg_pad", "def differenceOfGausssians(image,sigma0, sigma1,window_size, roi, out = None):\n return (vigra.filters.gaussianSmoothing(image,sigma0,window_size=window_size,roi = roi)-vigra.filters.gaussianSmoothing(image,sigma1,window_size=window_size,roi = roi))", "def filter_unsharp(img: np.ndarray, blur_algo='median', kernel_size=None, strength=0.3, unsharp_algo='laplacian'):\n #h,w,c = img.shape\n imgtype = img.dtype\n \n #can randomize strength from 0.5 to 0.8\n # if strength is None:\n # strength = np.random.uniform(0.3, 0.9)\n \n if unsharp_algo == 'DoG':\n #If using Difference of Gauss (DoG)\n #run a 5x5 gaussian blur then a 3x3 gaussian blr\n blur5 = cv2.GaussianBlur(img,(5,5),0)\n blur3 = cv2.GaussianBlur(img,(3,3),0)\n DoGim = blur5 - blur3\n img_out = img - strength*DoGim\n \n else: # 'laplacian': using LoG (actually, median blur instead of gaussian)\n #randomize kernel_size between 1, 3 and 5\n if kernel_size is None:\n kernel_sizes = [1, 3, 5] #TODO: ks 5 is causing errors\n kernel_size = random.choice(kernel_sizes)\n # Median filtering (could be Gaussian for proper LoG)\n #gray_image_mf = median_filter(gray_image, 1)\n if blur_algo == 'median':\n smooth = cv2.medianBlur(img.astype(np.uint8), kernel_size)\n # Calculate the Laplacian (LoG, or in this case, Laplacian of Median)\n lap = cv2.Laplacian(smooth,cv2.CV_64F)\n # Calculate the sharpened image\n img_out = img - strength*lap\n \n # Saturate the pixels in either direction\n img_out[img_out>255] = 255\n img_out[img_out<0] = 0\n \n return img_out.astype(imgtype)", "def gaussExpand(img: np.ndarray, gs_k: np.ndarray) -> np.ndarray:\r\n gs_k = (gs_k / gs_k.sum()) * 4\r\n if img.ndim == 3:\r\n h, w, d = img.shape[:3]\r\n newImg = np.zeros((2 * h, 2 * w, d))\r\n else:\r\n h, w = img.shape[:2]\r\n newImg = np.zeros((2 * h, 2 * w))\r\n newImg[::2, ::2] = img\r\n image = cv2.filter2D(newImg, -1, gs_k, cv2.BORDER_REPLICATE)\r\n return image", "def blurd_image(img,order=5,direction='horizontal',strength=0.25,speed='slow'):\n\tny,nx = np.shape(img)\n\tif speed == 'slow':\n\t\tb, a = scig.butter(order, strength)\n\t\tif direction == 'horizontal':\n\t\t\tfor i in range(ny):\n\t\t\t\timg[i,:] = scig.filtfilt(b, a, img[i,:])\n\t\telif direction == 'vertical':\n\t\t\tfor i in range(nx):\n\t\t\t\timg[:,i] = scig.filtfilt(b, a, img[:,i])\n\t\treturn img\n\telif speed=='fast':\n\t\tK = np.ones((int(strength),1))\n\t\tK = K/np.sum(K)\n\t\tif direction=='vertical':\n\t\t\timg=scig.convolve2d(img,K,mode='same')\n\t\telif direction=='horizontal':\n\t\t\timg=scig.convolve2d(img,K.transpose(),mode='same')\n\t\treturn img", "def sharp_img(img,stride=1,pad_type='None'):\n\n kernel = kernel_bank['sharp']\n return img_conv_2D(img,kernel,stride,pad_type)", "def gs_blur(self,k,img):\n SIG = self.sigma\n sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]\n gsArray = [0,1,2,3,4]\n scaleImages = [0,1,2,3,4]\n \n for i in range(5):\n gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])\n\n return gsArray", "def convolve_psf(a, fwhm, edge='invert', replace_nan=True, debug=False):\n const2 = 2.354820046 # 2*sqrt(2*ln(2))\n const100 = 3.034854259 # sqrt(2*ln(100))\n sigma = fwhm / const2\n # gaussian drops to 1/100 of maximum value at x =\n # sqrt(2*ln(100))*sigma, so number of pixels to include from\n # centre of gaussian is:\n n = np.ceil(const100 * sigma)\n if replace_nan:\n a = nan2num(a, replace='interp')\n if debug:\n print(\"First and last {0} pixels of output will be invalid\".format(n))\n x = np.linspace(-n, n, 2*n + 1) # total no. of pixels = 2n+1\n gauss = np.exp(-0.5 * (x / sigma) ** 2 )\n\n return convolve_window(a, gauss, edge=edge)", "def smooth(self):\n from scipy import ndimage\n\n # smooth images\n\n # integral of original images\n integral_images = self.background_cube.integral_images\n\n # number of times to smooth\n n_counts = self.counts_cube.data.sum()\n if n_counts >= 1.e6:\n n_smooth = 3\n elif (n_counts < 1.e6) and (n_counts >= 1.e5):\n n_smooth = 4\n else:\n n_smooth = 5\n\n # smooth images\n\n # define smoothing kernel as k5a in root:\n # https://root.cern.ch/root/html/TH2.html#TH2:Smooth\n kernel = np.array([[0, 0, 1, 0, 0],\n [0, 2, 2, 2, 0],\n [1, 2, 5, 2, 1],\n [0, 2, 2, 2, 0],\n [0, 0, 1, 0, 0]])\n\n # loop over energy bins (i.e. images)\n for i_energy in np.arange(len(self.background_cube.energy_edges) - 1):\n # loop over number of times to smooth\n for i_smooth in np.arange(n_smooth):\n data = self.background_cube.data[i_energy]\n image_smooth = ndimage.convolve(data, kernel)\n\n # overwrite bg image with smoothed bg image\n self.background_cube.data[i_energy] = Quantity(image_smooth,\n self.background_cube.data.unit)\n\n # integral of smooth images\n integral_images_smooth = self.background_cube.integral_images\n\n # scale images to preserve original integrals\n\n # loop over energy bins (i.e. images)\n for i_energy in np.arange(len(self.background_cube.energy_edges) - 1):\n self.background_cube.data[i_energy] *= (integral_images / integral_images_smooth)[i_energy]", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel=(3, 3)):\n out = cv2.GaussianBlur(img, kernel, 0)\n return out", "def edgeDetectionCanny(img: np.ndarray, thrs_1: float, thrs_2: float) -> (np.ndarray, np.ndarray):\r\n # smooth the image with a Gaussian filter:\r\n smooth_img = cv2.GaussianBlur(img, (3, 3), 0)\r\n\r\n # Compute the partial derivatives Ix, Iy, and magnitude and direction of the gradient:\r\n directions, magnitude, Ix, Iy = convDerivative(smooth_img)\r\n angle = np.degrees(directions)\r\n magnitude = (magnitude / magnitude.max()) * 255\r\n\r\n # non maximum suppression:\r\n edge_img = np.zeros_like(magnitude)\r\n M, N = img.shape\r\n for i in range(1, M - 1):\r\n for j in range(1, N - 1):\r\n ni1 = ni2 = magnitude[i, j]\r\n # angle 0\r\n if (0 <= angle[i, j] < 22.5) or (157.5 <= angle[i, j] <= 180):\r\n ni1 = magnitude[i, j + 1]\r\n ni2 = magnitude[i, j - 1]\r\n # angle 45\r\n elif 22.5 <= angle[i, j] < 67.5:\r\n ni1 = magnitude[i + 1, j - 1]\r\n ni2 = magnitude[i - 1, j + 1]\r\n # angle 90\r\n elif 67.5 <= angle[i, j] < 112.5:\r\n ni1 = magnitude[i + 1, j]\r\n ni2 = magnitude[i - 1, j]\r\n # angle 135\r\n elif 112.5 <= angle[i, j] < 157.5:\r\n ni1 = magnitude[i - 1, j - 1]\r\n ni2 = magnitude[i + 1, j + 1]\r\n if (magnitude[i, j] >= ni1) and (magnitude[i, j] >= ni2):\r\n edge_img[i, j] = magnitude[i, j]\r\n else:\r\n edge_img[i, j] = 0\r\n\r\n # hysteresis:\r\n strong_edges = np.zeros_like(edge_img)\r\n for x in range(M):\r\n for y in range(N):\r\n if edge_img[x, y] > thrs_1:\r\n strong_edges[x, y] = edge_img[x, y]\r\n\r\n for x in range(M):\r\n for y in range(N):\r\n if edge_img[x, y] <= thrs_2:\r\n edge_img[x, y] = 0\r\n if thrs_2 < edge_img[x, y] <= thrs_1:\r\n if strong_edges[x - 1, y] == strong_edges[x + 1, y] == strong_edges[x, y - 1] == strong_edges[\r\n x, y + 1] == strong_edges[x - 1, y - 1] == strong_edges[x + 1, y + 1] == strong_edges[\r\n x + 1, y - 1] == strong_edges[x - 1, y + 1] == 0:\r\n edge_img[x, y] = 0\r\n\r\n cv_ans = cv2.Canny(img, thrs_2, thrs_1)\r\n return cv_ans, edge_img", "def blur_spatial(im, kernel_size):\n kernel = gaus_kernel_calc(kernel_size)\n\n return scipy.signal.convolve2d(im, kernel, 'same').astype(np.float64)", "def smooth_with_function_and_mask(image, function, mask):\n bleed_over = function(mask.astype(float))\n masked_image = np.zeros(image.shape, image.dtype)\n masked_image[mask] = image[mask]\n smoothed_image = function(masked_image)\n output_image = smoothed_image / (bleed_over + np.finfo(float).eps)\n return output_image", "def fake_gaussian(img, vertical_horizontal_sigma, iter=3):\n sigma_vertical, sigma_horizontal = vertical_horizontal_sigma\n h_blured = box_filter1d(img, sigma_horizontal, horizontal=True, iter=iter)\n blured = box_filter1d(h_blured, sigma_vertical, horizontal=False, iter=iter)\n return blured", "def dogonvole(image, psf, kernel=(2., 2., 0.), blur=(1.3, 1.3, 0.), niter=10):\n global hot_pixels\n if not psf.sum() == 1.:\n raise ValueError(\"psf must be normalized so it sums to 1\")\n image = image.astype('float32')\n imin = image.min()\n for y, x in hot_pixels:\n image[y, x] = imin;\n \n img_bg = ndimage.gaussian_filter(image, kernel[:len(image.shape)])\n image = numpy.subtract(image, img_bg)\n numpy.place(image, image<0, 1./2**16)\n image = image.astype('uint16')\n if len(image.shape)==3:\n for i in range(image.shape[2]):\n image[:,:,i] = restoration.richardson_lucy(image[:,:,i], psf,\n niter, clip=False)\n elif len(image.shape)==2:\n image = restoration.richardson_lucy(image, psf, niter, clip=False)\n else:\n raise ValueError('image is not a supported dimensionality.')\n image = ndimage.gaussian_filter(image, blur[:len(image.shape)])\n return image", "def gaussian_1xDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dx_kernel = gaussianFirstDerivative(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(g_dx_kernel))\n \n if gSum == 0:\n print \"Warning dx_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dx_kernel)\n else:\n return (g_dx_kernel / gSum)", "def smooth(D, W, smoothing):\n WD = scipy.ndimage.gaussian_filter(W * D, smoothing)\n W = scipy.ndimage.gaussian_filter(W, smoothing)\n D = np.divide(WD, W, out=np.zeros_like(D), where=W > 0)\n return D, W", "def filter2D(img, kernel = (5,5)):\n\ttmp = img.copy()\n\tk = np.ones((kernel[0], kernel[1]), np.float32) / (kernel[0]*kernel[1])\n\tdst = cv2.filter2D(tmp, -1, k)\n\treturn dst", "def eeg_smooth(array,window,window_len):\t\n\tarray_smooth = np.zeros(array.shape)\n\tif not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'kaiser']:\n\t\traise ValueError, \"Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'kaiser'\"\n\t\t\n\tif window == 'flat':\n\t\tw = np.ones(window_len)\n\telif window == 'kaiser':\n\t\tw = eval('np.'+window+'(window_len,4)')\t\t\n\telse:\n\t\tw = eval('np.'+window+'(window_len)')\t\t\n\t\t\n\t\n\tif len(array.shape) == 1:\n\t\tntpts = len(array)\n\t\tarray_smooth = np.convolve(array, w/w.sum(), mode='same')\n\t\n\telif len(array.shape) == 2:\n\t\t[nchan,ntpts] = array.shape\n\t\tfor i in range(0,nchan):\n\t\t\tarray_smooth[i,:] = np.convolve(array[i,:], w/w.sum(), mode='same')\n\t\n\telif len(array.shape) > 2:\n\t\tprint 'Error: only works with 1 or 2 dimensions'\n\t\t\n\treturn array_smooth", "def __smoothen_color(self, outer, inner):\n outer_curve = zip(outer[0], outer[1])\n inner_curve = zip(inner[0], inner[1])\n x_points = []\n y_points = []\n for point in outer_curve:\n x_points.append(point[0])\n y_points.append(point[1])\n for point in inner_curve:\n x_points.append(point[0])\n y_points.append(point[1])\n img_base = np.zeros((self.height, self.width))\n cv2.fillConvexPoly(img_base, np.array(np.c_[x_points, y_points], dtype='int32'), 1)\n img_mask = cv2.GaussianBlur(img_base, (81, 81), 0) #51,51\n img_blur_3d = np.ndarray([self.height, self.width, 3], dtype='float')\n img_blur_3d[:, :, 0] = img_mask\n img_blur_3d[:, :, 1] = img_mask\n img_blur_3d[:, :, 2] = img_mask\n self.im_copy = (img_blur_3d * self.image * 0.7 + (1 - img_blur_3d * 0.7) * self.im_copy).astype('uint8')", "def smooth(y, box_pts):\r\n box = np.ones(box_pts)/box_pts\r\n y_smooth = np.convolve(y, box, mode='same')\r\n return y_smooth", "def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()", "def convolveGauss(self, R, m, z, fwhm):\n sigmaBeam = fwhm / np.sqrt(8.*np.log(2.))\n # do the smoothing\n def f(x):\n if np.isfinite(i0(x*X/sigmaBeam**2)):\n result = x * self.rho2d(x) * np.exp(-0.5*(x**2+X**2)/sigmaBeam**2) * i0(x*X/sigmaBeam**2) / sigmaBeam**2\n else:\n result = 0.\n return result\n result = integrate.quad(f, 0., np.inf, epsabs=0., epsrel=1.e-2)[0]\n return result", "def heavy_blur_skeleton_hand(save_path=None):\n im = auto_hand_img() # reload the edge map\n blurred = gaussian(np.copy(im)) \n #blurred = blurred * blurred # strengthen the image by multiplying\n im2 = to_rgba(np.copy(im)) # take an RGBA copy to add the skeleton onto\n skel = skeletonize(blurred) # given as a Boolean array\n skel_blur = gaussian(np.copy(skel), sigma=3)\n skel_blur *= (255/np.max(skel_blur))\n # manually examine the distribution to set a threshold for binarisation\n # for i in np.arange(0,101,1): print(np.percentile(skel_blur, i))\n skel_blur[skel_blur >= 30] = 255\n skel_blur[skel_blur < 30] = 0\n skel2 = (skel_blur/255).astype(bool)\n # also expand the edge map using the blurred version for visibility\n im2[blurred <= 0.75] = [0,0,0,255]\n # set the skeleton pixels to red in the edge map copy\n im2[skel2] = [255, 0, 0, 255]\n if save_path is None:\n return im2\n else:\n save_image(im2, (8,8), save_path)\n return im2", "def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y", "def apply_filter(self, image):\n gauss_low = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_low , sigmaY=self._sigma_low)\n gauss_high = cv2.GaussianBlur(image, ksize=(0,0), sigmaX=self._sigma_high, sigmaY=self._sigma_high)\n\n filtered_image = gauss_low - gauss_high\n\n return normalize(filtered_image, nb_bits=8)", "def binarize(img, s_thres=(170, 255), l_thres=(50, 255), sobel_thres=(30, 80)):\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n hls[:, :, 1] = clahe.apply(hls[:, :, 1])\n\n l_image = hls[:, :, 1]\n l_blur = cv2.GaussianBlur(l_image, (0, 0), 9)\n l_image = cv2.addWeighted(l_image, 1, l_blur, -1, 0)\n l_image = cv2.normalize(l_image, np.zeros_like(l_image), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n l_binary = np.zeros_like(l_image)\n l_binary[(l_image >= l_thres[0]) & (l_image <= l_thres[1])] = 1\n\n # Sobel x\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # gray = hls[:, :, 1]\n # sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n # abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n # scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n # sxbinary = np.zeros_like(scaled_sobel)\n # sxbinary[(scaled_sobel >= sobel_thres[0]) & (scaled_sobel <= sobel_thres[1])] = 1\n # sxbinary = s_binary\n\n s_channel = hls[:, :, 2]\n s_channel = cv2.normalize(s_channel, np.zeros_like(s_channel), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thres[0]) & (s_channel <= s_thres[1])] = 1\n\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(s_binary)\n combined_binary[(s_binary == 1) | (l_binary == 1)] = 1\n\n # we filter out the lines with too many active pixels\n combined_binary_rows = combined_binary.sum(1)\n combined_binary[combined_binary_rows > (combined_binary.shape[1] / 2)] = 0\n\n return combined_binary", "def softing_noise(image, kn):\n\n s_noise = cv2.GaussianBlur(image, (kn, kn), 0)\n\n return s_noise", "def hover_to_inst(grad_gauss_filter: int = 7, grad_thresh: float = 0.4) -> \\\n Callable[[Tensor, Tensor], Tensor]:\n assert 0 <= grad_thresh < 1\n assert grad_gauss_filter % 2 == 1\n\n def process(np: Tensor, hv: Tensor) -> Tensor:\n \"\"\"Process function\"\"\"\n np_p = np.detach()\n h_raw = hv[:, :1].detach()\n v_raw = hv[:, 1:].detach()\n\n np_p[np_p >= 0.5] = 1\n np_p[np_p < 0.5] = 0\n\n h = batch_min_max(h_raw)\n v = batch_min_max(v_raw)\n\n s = sobel(grad_gauss_filter).to(np.device)\n\n sobel_h = torch.conv2d(h, s[None, None, ...])\n sobel_h = pad(sobel_h, [grad_gauss_filter // 2] * 4, 'replicate')\n sobel_v = torch.conv2d(v, s.T[None, None, ...])\n sobel_v = pad(sobel_v, [grad_gauss_filter // 2] * 4, 'replicate')\n\n sobel_h = 1 - batch_min_max(sobel_h)\n sobel_v = 1 - batch_min_max(sobel_v)\n\n overall = torch.max(sobel_h, sobel_v)\n overall = overall - (1 - np_p)\n overall[overall < 0] = 0\n\n energy = -(1.0 - overall) * np_p\n energy = kornia.filters.gaussian_blur2d(energy, (3, 3), sigma=(1, 1))\n energy = energy.cpu().numpy()\n\n overall = 1.0 * (overall >= grad_thresh)\n\n m = np_p - overall\n m[m < 0] = 0\n m = m.cpu().numpy()\n np_p = np_p.cpu().numpy()\n\n inst_map = []\n for i in range(np_p.shape[0]):\n m_i = binary_fill_holes(m[i][0]).astype('uint8')\n m_i = remove_small_objects(m_i > 0, 10)\n m_i = measurements.label(m_i)[0]\n w = watershed(energy[i][0], m_i, mask=np_p[i][0])\n inst_map.append(w)\n inst_map = numpy.stack(inst_map)[:, None]\n return torch.tensor(inst_map, device=np.device)\n\n return process", "def gaussian_blur(img,key='gaussian_3x3',stride=1,pad_type='None'):\n\n kernel = kernel_bank[key]\n return img_conv_2D(img,kernel,stride,pad_type)", "def smooth_signal(signal, N):\n # Preprocess edges\n signal = np.concatenate([signal[0:N], signal, signal[-N:]])\n # Convolve\n signal = np.convolve(signal, np.ones((N,))/N, mode='same')\n # Postprocess edges\n signal = signal[N:-N]\n\n return signal" ]
[ "0.739436", "0.71233726", "0.70680994", "0.68737566", "0.6743445", "0.67295676", "0.65329146", "0.6494786", "0.64919174", "0.647246", "0.64332294", "0.6390439", "0.63026875", "0.61877143", "0.61825424", "0.61740756", "0.6155781", "0.61314356", "0.6101635", "0.6092838", "0.6052806", "0.60505486", "0.6046168", "0.6037932", "0.60330594", "0.5996631", "0.59768176", "0.59469724", "0.5927561", "0.5909944", "0.58823216", "0.5877768", "0.58712715", "0.5864668", "0.5850025", "0.58342", "0.58332974", "0.5810013", "0.58050764", "0.57945484", "0.57823426", "0.57586116", "0.5737108", "0.5726202", "0.57181364", "0.5717995", "0.5701331", "0.56975394", "0.5696341", "0.56797147", "0.56671053", "0.5656292", "0.5648778", "0.56465685", "0.56420445", "0.56354815", "0.5633689", "0.5628704", "0.56253874", "0.56212443", "0.5617328", "0.56152534", "0.56115925", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56097233", "0.56041074", "0.560176", "0.5601065", "0.55997235", "0.5595445", "0.55933505", "0.5572725", "0.5564485", "0.5559337", "0.55571115", "0.5555364", "0.5544627", "0.55440927", "0.5543328", "0.5538573", "0.5535321", "0.5521803", "0.55206263", "0.5518843", "0.5511361", "0.5508374", "0.5505375" ]
0.69962204
3
Read an image from the disk or assign existing object to the output.
def get_image(imagename, nx = 0, ny = 1, nz = 1, im = 0): if type(imagename) == type(""): e = EMData() e.read_image(imagename, im) elif not imagename: e = EMData() if (nx > 0): e.set_size(nx, ny, nz) else: e = imagename return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self):\n with self.lock:\n return self.image", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def read_from_filename(self, filename=''):\r\n self.raw_image = skimage.io.imread(filename)\r\n self.bk_image = np.copy( self.raw_image )", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def load_image_file_like(self, file_like_obj, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image_file_like(self, file_like_obj, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def _read_image(self, image_path:str, label:str):\n # Get the full path to the image\n image = \"\"\n if label == \"real\":\n image = os.path.join(self.root, \"real\", image_path)\n else:\n image = os.path.join(self.root, \"fake\", image_path)\n \n # Read the image\n image = cv2.imread(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Normalize the image\n image = image / 255.0\n\n # Convert the image to floating point to use it as\n # an input to the PyTorch model\n image = image.astype(np.float32)\n\n return image", "def readimage(self, fp):\n execfile = open(self.binpath, \"r\")\n databuf = execfile.read(4096)\n while databuf:\n fp.write(databuf)\n databuf = fp.read(4096)\n fp.flush()\n execfile.close()", "def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')", "def read_image(image_path):\n if not os.path.exists(image_path):\n raise IOError('File does not exist: %s' % image_path)\n else:\n return Image.open(image_path)", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def load_image(self, image_id):\n# logger.info(\"image {}\".format(image_id))\n info = self.image_info[image_id]\n if info[\"image\"] is None:\n im = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n image = np.ones([info['height'], info['width'], 1], dtype=np.uint8)\n image[:,:,0] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,1] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,2] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n self.image_info[image_id][\"image\"] = image\n# logger.info(\"cached {}\".format(image_id))\n else:\n image = self.image_info[image_id][\"image\"]\n# logger.info(\"missed {}\".format(image_id))\n\n return image", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def deserialize_image(self, data, give_file_name):\r\n # Generate a random 8-character name\r\n # name = \"img_\" + self.generate_random_name() + \".png\"\r\n name = give_file_name + \".png\"\r\n file_path = os.path.join(self.temp_dir, name)\r\n img = Image.frombytes(data['mode'], data['size'], data['pixels'])\r\n img.save(file_path)\r\n return file_path", "def read_image(path):\n img = misc.imread(path)\n return img", "def open_image(self):\n self.orig_image = Image.open(self.filename)\n if self.in_rgb:\n self.orig_image = self.orig_image.convert(\"RGB\")\n if self.min_filter:\n self.orig_image.filter(ImageFilter.MinFilter(self.min_filter))", "def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image", "def load(path) -> Image:\n return Image.open(path)", "def load_image(self, image_id):\n # Load image\n path = self.image_info[image_id]['path']\n if path.endswith(\".png\" or \".jpg\"):\n image = skimage.io.imread(path)\n elif path.endswith(\".dcm\"):\n ds = pydicom.read_file(path)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def load_image(self):\n\n if self.image_file is None:\n raise IOError(\"Set image_file before calling this method\")\n self.hdu = fits.open(self.image_file)[0]\n self.wcs = astropy_wcs.WCS(self.hdu.header)\n self.header = self.hdu.header", "def test_read_image(self):\n pass", "def image(self, name=None):\n return self.find(self.images(), name=name)", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def getImage(filename):\n if not fs.exists(filename=filename):\n raise Exception(\"mongo file does not exist! {0}\".format(filename)) \n im_stream = fs.get_last_version(filename)\n im = Image.open(im_stream)\n img_io = BytesIO() \n im.save(img_io, 'JPEG', quality=70)\n img_io.seek(0)\n return img_io\n \n #return serve_pil_image(im)\n\n #d = ObjectId('5ad204a5c2eb5d031a7fd7e5') \n #connection = MongoClient()\n #database = connection['image']\n # create a new gridfs object.\n #fs = gridfs.GridFS(database)\n #outputdata = fs.get(d).read()\n #decode=outputdata#.decode()\n #return decode", "def read_image(img_path):\n\tgot_img = False\n\twhile not got_img:\n\t\ttry:\n\t\t\timg = Image.open(img_path).convert('RGB')\n\t\t\timg = img.resize((100,100),Image.ANTIALIAS)\n\t\t\tgot_img = True\n\t\texcept IOError:\n\t\t\tprint(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n\t\t\tpass\n\treturn img", "def get_image ( self, object ):\n return self.image", "def __read_image(self, filename):\n self.image = cv2.imread(filename)\n self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)\n self.im_copy = self.image.copy()\n self.height, self.width = self.image.shape[:2]\n self.debug = 0", "def __getitem__(self, idx):\n image = Image.open(self.filenames[idx]) # PIL image\n image = self.transform(image)\n return image", "def read_img(img_id, train_or_test, size):\n img = image.load_img(join(data_dir, train_or_test, img_id + '.jpg'), target_size=size)\n # img = image.img_to_array(img)\n return img", "def make_image(self, path):\n\t\treturn None", "def open(self, infile, cache=True):\n return _image.image_open(self, infile, cache)", "def image_process(image_info):\n path = os.path.join(cfg.IMAGESET, image_info.get(\"index\") + \".jpg\")\n if not os.path.exists(path):\n raise IOError(\"please check your file is not exists: \" + path)\n def load_image(path):\n image = Image.open(path)\n return image\n return load_image(path)", "def read_single_image(image_entry, dir, offset_percent, output_size, normalize=True, rotate=True, preds=True):\r\n if preds:\r\n image_name = dir+'img_'+image_entry[0][1:-1]+'_'+str(image_entry[3])+'.jpg'\r\n if output_size[2] == 1:\r\n full_image = skio.imread(image_name, as_grey=True) # read in a greyscale\r\n else:\r\n full_image = skio.imread(image_name, as_grey=False)\r\n scaling = float(output_size[0])/float(image_entry[3])\r\n else:\r\n from skimage import transform as sktf\r\n image_name = dir + 'img_' + image_entry[0][1:-1] + '.jpg'\r\n if output_size[2] == 1:\r\n full_image = skio.imread(image_name, as_grey=True) # read in a greyscale\r\n else:\r\n full_image = skio.imread(image_name, as_grey=False)\r\n # scale and downsample the image here to reduce computation\r\n o_size = np.shape(full_image)\r\n scaling = float(output_size[0])/float(image_entry[3])\r\n o_shape = [int(scaling*o_size[0]), int(scaling*o_size[1])]\r\n full_image = sktf.resize(full_image, o_shape)\r\n image_size = np.shape(full_image)\r\n if normalize:\r\n full_image = image_normalize(full_image, image_size) # normalizes the image that was read in\r\n else:\r\n full_image = full_image\r\n # compute random center offsets\r\n cent_x = float(image_entry[1]) + float(image_entry[3])*rd.uniform(-1.0*float(offset_percent), float(offset_percent))\r\n cent_y = float(image_entry[2]) + float(image_entry[3])*rd.uniform(-1.0*float(offset_percent), float(offset_percent))\r\n # compute a corner of the image cutout to use as starting point for making matrix of cutout coordinates\r\n left_x = scaling*(cent_x - 0.5 * float(image_entry[3]))\r\n top_y = scaling*(cent_y - 0.5 * float(image_entry[3]))\r\n pixel_locations_x = np.zeros(output_size[0:2]) # create a 2D array to hold all the pixel locations of the cutout\r\n pixel_locations_y = np.zeros(output_size[0:2])\r\n for i in range(output_size[0]): # leverage the fact that along an axis, x/y locations are identical\r\n pixel_locations_x[:, i] = left_x + i*1.0\r\n pixel_locations_y[i, :] = top_y + i*1.0\r\n # ravel them to make easier to process\r\n pixel_locations_x = np.ravel(pixel_locations_x)\r\n pixel_locations_y = np.ravel(pixel_locations_y)\r\n if rotate:\r\n angle = rd.uniform(0.0, 6.284) # select a random rotation angle\r\n sina = math.sin(angle)\r\n cosa = math.cos(angle)\r\n rotmat = np.array([[cosa, -1.0 * sina], [sina, cosa]])\r\n # the rotation should occur about the center of the image cutout location, so translate the origin:\r\n rel_loc_x = [i - scaling*cent_x for i in pixel_locations_x]\r\n rel_loc_y = [i - scaling*cent_y for i in pixel_locations_y]\r\n # rotate the corners now\r\n for i in range(len(pixel_locations_x)):\r\n rotated_coord = np.matmul(rotmat, np.array([[rel_loc_x[i]], [rel_loc_y[i]]]))\r\n pixel_locations_x[i] = rotated_coord[0, 0] + scaling*cent_x\r\n pixel_locations_y[i] = rotated_coord[1, 0] + scaling*cent_y\r\n # now go ahead and use the rotated (or unrotated, if rotate=false) corners to actually extract the image cutout\r\n # first round corners to be the nearest integer\r\n pixel_locations_x = np.array([int(i) for i in pixel_locations_x])\r\n pixel_locations_y = np.array([int(i) for i in pixel_locations_y])\r\n # if the computed pixel locations are outside the bounds of the image, pad the image with black\r\n if (np.min(pixel_locations_x)<=0 or np.min(pixel_locations_y) <= 0\r\n or np.max(pixel_locations_x) >= image_size[1] or np.max(pixel_locations_y) >= image_size[0]):\r\n full_image, pixel_locations_x, pixel_locations_y = image_pad(full_image, pixel_locations_x, pixel_locations_y)\r\n \"\"\"debug\r\n print('x_cent '+str(scaling*cent_x))\r\n print('y_cent '+str(scaling*cent_y))\r\n viewer = ImageViewer(full_image)\r\n viewer.show()\r\n \"\"\"\r\n output_image = np.ravel(full_image[pixel_locations_y, pixel_locations_x])\r\n \"\"\"\r\n output_image = np.reshape(full_image[pixel_locations_y, pixel_locations_x], output_size)\r\n viewer2 = ImageViewer(output_image)\r\n viewer2.show()\r\n \"\"\"\r\n return output_image", "def use_model_on_one_image(self, image_path, model_path, save_path):\n if self.cuda:\n self.unet.load_state_dict(torch.load(model_path))\n else:\n self.unet.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\n\n ori_image = Image.open(image_path).convert('L')\n transform = ToTensor()\n\n input = transform(ori_image)\n if self.cuda:\n input = Variable(input.cuda())\n else:\n input = Variable(input)\n input = torch.squeeze(input,0)\n\n output = unet(input)\n\n if self.cuda:\n output = output.cuda()\n\n result = torch.cat([input.data, output.data], 0)\n\n torchvision.utils.save_image(result, save_path)", "def _load_from_memory(self, cmd, ffile, data):\n status = 0\n with contextlib.closing(io.BytesIO()) as new_file:\n new_file.name = ffile\n try:\n data.writeto(new_file, overwrite=True)\n new_fits = new_file.getvalue()\n\n log.debug(\"Running DS9 command: {}\".format(cmd))\n status = self.run(cmd, buf=[new_fits,\n len(new_fits)])\n except (TypeError, ValueError):\n msg = \"Cannot load image {} \" \\\n \"from memory\".format(ffile)\n log.warning(msg)\n raise ValueError(msg)\n return status", "def load(self, path, shape=(1024, 1024, 35), dtype='uint16'):\n valid_dtypes = ['uint8', 'uint16']\n if dtype not in valid_dtypes:\n raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes))\n\n im = io.imread(path)\n im = numpy.rollaxis(im, 0, 3)\n\n if im.shape != shape and shape is not None:\n factors = tuple(map(lambda z: int(z[0] / z[1]), zip(im.shape, shape)))\n if any([f > 1 for f in factors]):\n # im = resize(im, shape, mode='constant')\n im = downscale_local_mean(im, factors=factors).astype(im.dtype)\n # if 'conf' in path.lower():\n else:\n warnings.warn('Target shape is not a multiple below initial shape')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n if dtype == 'uint8' and im.dtype != numpy.uint8:\n im = img_as_ubyte(im)\n if dtype == 'uint16' and im.dtype != numpy.uint16:\n im = img_as_uint(im)\n\n self.image_raw = im\n self.name = path", "def get_image(self, image):\n self.original_image = image\n self.prepare_images()", "def read(self, timestamp):\n\n if timestamp is None:\n raise ValueError(\"No time stamp passed\")\n\n try:\n return_img, return_metadata = self._read_img()\n except IOError:\n warnings.warn('Error loading image for {}, '\n 'generating empty image instead'.format(timestamp.date()))\n return_img, return_metadata = self._read_empty()\n\n if self.flatten:\n self.img = Image(self.grid.activearrlon, self.grid.activearrlat,\n return_img, return_metadata, timestamp)\n\n else:\n try:\n shape = self.grid.subset_shape\n except AttributeError:\n shape = self.grid.shape\n\n rows, cols = shape\n for key in return_img:\n return_img[key] = np.flipud(return_img[key].reshape(rows, cols))\n\n self.img = Image(self.grid.activearrlon.reshape(rows, cols),\n np.flipud(self.grid.activearrlat.reshape(rows, cols)),\n return_img,\n return_metadata,\n timestamp)\n return self.img", "def get_img(self, img=None):\n\n if self.img is None: #No image specified to the ROI object\n\n # If no image is saved, check if an image was passed. If so, return the ROI of that image.\n if img is None:\n print('no image provided')\n else:\n return img[self.coords[0]:self.coords[1], self.coords[2]:self.coords[3]]\n else:\n return self.img", "def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)", "def load_image(cls, fullname):\n\t\ttry:\n\t\t\timage_stream = open(fullname, 'rb')\n\t\t\timage = pyglet.image.load(fullname, file=image_stream)\n\t\texcept IOError, message:\n\t\t\tprint 'Cannot load image:', fullname\n\t\t\traise ImageLoadFileIOError, message\n\t\treturn image", "def load_image(file_path):\r\n return Image.open(file_path)", "def read_image(self, path):\n patch_img = Image.open(path).convert('RGB')\n tf = transforms.Resize((self.config.patch_size, self.config.patch_size))\n patch_img = tf(patch_img)\n tf = transforms.ToTensor()\n \n adv_patch_cpu = tf(patch_img)\n return adv_patch_cpu", "def image(self, name=None):\n raise NotImplementedError", "def open(self, img_name, size=\"default\"):\n print(\"Openning %s\" % img_name)\n self.img_original = Image.open(img_name, mode='r')\n self.img_name = img_name\n\n if size == \"default\":\n size = self.img_original.size[0]\n\n self.img_debut = self.img_resize(size)\n return self.img_debut", "def get_image(image_path):\r\n\r\n return Image.open(image_path)", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n from ipdb import set_trace; set_trace()\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def read_image(image_path, *args, **kwargs):\n # TODO: Implement the method\n image2 = Image.open(image_path)\n image = num.asarray(image2)\n\n return image", "def load_image(default=True):\n if default:\n print(\"in heres\")\n return self.img\n else:\n img = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))\n self.size = img.shape\n return img", "def store_image(self, http_client, link_hash, src, config):\r\n # check for a cache hit already on disk\r\n image = self.read_localfile(link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n # no cache found download the image\r\n data = self.fetch(http_client, src)\r\n if data:\r\n image = self.write_localfile(data, link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n return None", "def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")", "def read(self, **kwargs):\n\n if not os.path.isfile(self.file_name):\n self.build()\n elif 'clobber' in self.kwargs.keys():\n if self.kwargs['clobber']: \n self.build()\n\n # read dLOS file from parent class\n super(DlosPhotoz, self).read()\n\n self.dlos, self.dlos_photoz = np.loadtxt(\n self.file_name, \n skiprows=1, \n unpack=True, \n usecols=[0, 1]\n )\n return None", "def fromrecord(self, *args, **kwargs):\n return _image.image_fromrecord(self, *args, **kwargs)", "def read_image(fileame, representation):\n validate_representation(representation)\n\n im = imread(fileame)\n if representation == 1 and is_rgb(im):\n # We should convert from Grayscale to RGB\n im = rgb2gray(im)\n return im.astype(np.float32)\n\n return normlized_image(im)", "def newimagefromfile(self, infile):\n return _image.image_newimagefromfile(self, infile)", "def load_image(self, filename, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image(self, filename, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def imread(fname):\r\n return skimage.io.imread(fname)", "def _set_image_reader(self, image_reader):\n\n self.variables.canvas_image_object = CanvasImage(\n image_reader, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.rescale_image_to_fit_canvas:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n else:\n self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)", "def _read_image_from_file(file_name):\n image_file = open(file_name, 'rb')\n image = image_file.read()\n image_file.close()\n return image", "def image_loader(fileobj):\n if isinstance(fileobj, six.string_types):\n return cv2.imread(fileobj, cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb\n elif isinstance(fileobj, bytes):\n byte_arr = bytearray(fileobj)\n else:\n byte_arr = bytearray(fileobj.read())\n \n return cv2.imdecode(np.asarray(byte_arr, dtype=np.uint8), cv2.IMREAD_COLOR)[..., ::-1] #bgr->rgb", "def load_image(self):\n if isinstance(self.filename, str):\n self.image = np.asarray(PIL.Image.open(self.filename))\n elif isinstance(self.filename, np.ndarray):\n self.image = np.asarray(self.filename)\n if self.image.ndim < 3:\n self.bw = True\n if self.image.ndim < 2:\n self.image = None\n print(\"file {} is not an appropriate format.\".format(\n self.filename))\n if self.image.ndim == 3:\n if self.image.shape[-1] == 1:\n self.image = np.squeeze(self.image)\n elif self.image.shape[-1] > 3:\n self.image = self.image[..., :-1]\n if (self.image[..., 0] == self.image.mean(-1)).mean() == 1:\n self.image = self.image[..., 0]\n self.bw = True\n return self.image", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def read(self, image, file_name, image_offset = 0, read_bytes = None):\n assert isinstance(image, str)\n assert isinstance(file_name, str)\n assert isinstance(image_offset, int)\n assert read_bytes == None or isinstance(read_bytes, int)\n\n assert image_offset >= 0, 'offset value should be positive'\n if read_bytes != None:\n assert read_bytes >= 0, 'bytes value should be positive'\n\n target = self.target\n target.report_info(f\"{image}: reading image\", dlevel = 1)\n\n with io.open(file_name, \"wb+\") as of, \\\n contextlib.closing(self.target.ttbd_iface_call(\"images\",\n \"flash\",\n method = \"GET\",\n stream = True,\n raw = True,\n image=image,\n image_offset=image_offset,\n read_bytes=read_bytes)) as r:\n # http://docs.python-requests.org/en/master/user/quickstart/#response-content\n chunk_size = 4096\n total = 0\n for chunk in r.iter_content(chunk_size):\n of.write(chunk)\n total += len(chunk)\t# not chunk_size, it might be less\n target.report_info(f\"{image}: read image\")\n target.report_info(f\"{image}: image saved to {Path(file_name).resolve()}\")\n return total\n\n return r['result']", "def img_read(name):\n\n img = cv2.imread(name)\n\n return img", "def reader(self, idx):\n # Get the path of input image and groundtruth mask.\n input_path, gtmask_path = self.imgs[idx]\n input_img, gt_img = self.loader(input_path, gtmask_path)\n return input_img, gt_img", "def get_input(path):\n img = imread(path)\n return img", "def set_image(self, image):\n\n # would be better if we only saved if it didn't exist\n if image.data:\n # save the images data\n self._set_image_data(image)\n\n # could be an update, could be new\n image = self._save_to_redis(image)\n\n # let the world know we have added a new image\n self.revent.fire('image_added',{\n 'source_page_url': image.source_page_url,\n 'source_url': image.source_url,\n 'shahash': image.shahash,\n 'vhash': image.vhash,\n 'xdim': image.xdim,\n 'ydim': image.ydim,\n })\n\n return image", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def read_image(img_path):\r\n got_img = False\r\n while not got_img:\r\n try:\r\n img = Image.open(img_path).convert('RGB')\r\n got_img = True\r\n except IOError:\r\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\r\n pass\r\n return img", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image", "def loadImage(name, size=0):\n path = os.path.join(PACKAGE_HOME, 'input', name)\n fd = open(path, 'rb')\n data = fd.read()\n fd.close()\n return data", "def open_image(name):\n img_name = 'input/' + name + '.png'\n return cv2.imread(img_name, cv2.IMREAD_UNCHANGED)", "def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img", "def read_image_from_disc(image_path,shape=None,dtype=tf.uint8): \n image_raw = tf.read_file(image_path)\n if dtype==tf.uint8:\n image = tf.image.decode_image(image_raw)\n else:\n image = tf.image.decode_png(image_raw,dtype=dtype)\n if shape is None:\n image.set_shape([None,None,3])\n else:\n image.set_shape(shape)\n return tf.cast(image, dtype=tf.float32)", "def read_image(img_path):\n got_img = False\n if not os.path.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def getImageObject(self, path):\n site = getSite()\n\n img = site.restrictedTraverse(path)\n\n # UGH... the resource based image isn't a real class but some\n # dynamically generated Five metaclass... no fucking clue\n # how to do interface checks against those,\n # so we just\n if (\"DirContainedImageResource\" in img.__class__.__name__) or (\"FileResource\" in img.__class__.__name__) :\n # Resource based image, on file system, handle with PIL\n # info = (width, height)\n source = img.context.path\n info = PIL.Image.open(source)\n return info\n elif isinstance(img, FSImage):\n # FSImage at /plone/logo\n # width, height = util.getImageInfo(img)\n # <implementedBy Products.CMFCore.FSImage.FSImage>\n return img\n elif isinstance(img, OFS.Image.Image):\n # image uploaded to a portal_skins/custom\n return img\n elif IATImage.providedBy(img):\n # Image is uploaded image content type\n return img.getImage()\n else:\n\n if callable(img):\n img = img()\n\n if isinstance(img, ATFieldImage):\n return img\n\n raise RuntimeError(\"Unknown image object %s:%s\" % (path, str(img.__class__)))\n\n return info", "def openFile(path_name):\n if os.path.isdir(path_name):\n reader = sitk.ImageSeriesReader()\n dicom_names = reader.GetGDCMSeriesFileNames(path_name)\n reader.SetFileNames(dicom_names)\n image_object = reader.Execute()\n \n elif os.path.isfile(path_name):\n image_object = sitk.ReadImage(path_name)\n\n else:\n print(\"Path name wrong.\")\n return None\n\n return image_object", "def _cache_and_write_image(self, image_info, device, configdrive=None):\n _download_image(image_info)\n self.partition_uuids = _write_image(image_info, device, configdrive)\n self.cached_image_id = image_info['id']", "def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def read_image(filename, representation):\n im = imread(filename)\n if representation == GS_REP:\n im = rgb2gray(im)\n im = np.divide(im, MAX_VALUE - 1)\n return im", "def read_image_from_fs(name):\n with open(name, \"rb\") as fin:\n return fin.read()", "def image(self):\n return self._image", "def read_img(img_id, data_dir, train_or_test, size):\n img = image.load_img(os.path.join(data_dir, train_or_test, '%s.jpg' % img_id), target_size=size)\n img = image.img_to_array(img)\n return img", "def load(self):\r\n self._open_image()\r\n\r\n # Handle images with palettes\r\n if self.image.palette and self.image.palette.mode == 'RGB':\r\n logger.debug(\"Converting P image to RGB using palette\")\r\n self.image = self.image.convert('RGB', palette=self.image.palette)\r\n\r\n components, data = image_data(self.image)\r\n\r\n texture = self.ctx.texture(\r\n self.image.size,\r\n components,\r\n data,\r\n )\r\n texture.extra = {'meta': self.meta}\r\n\r\n if self.meta.mipmap_levels is not None:\r\n self.meta.mipmap = True\r\n\r\n if self.meta.mipmap:\r\n if isinstance(self.meta.mipmap_levels, tuple):\r\n texture.build_mipmaps(*self.meta.mipmap_levels)\r\n else:\r\n texture.build_mipmaps()\r\n\r\n if self.meta.anisotropy:\r\n texture.anisotropy = self.meta.anisotropy\r\n\r\n self._close_image()\r\n\r\n return texture", "def image_loader(image_name, dev):\n image = Image.open(image_name)\n image = loader(image).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image.to(dev) #assumes that you're using GPU", "def __init__(self, file, name):\n if isinstance(file, ContentFile):\n image_data = file\n else:\n image_data = ContentFile(file.read(), name=name)\n self.image_data = image_data\n file.close()", "def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None", "def read_images_from_disk(input_queue):\n\tlabel = input_queue[1]\n\tfile_contents = tf.read_file(input_queue[0])\n\texample = tf.image.decode_jpeg(file_contents, channels=3)\n\treturn example, label", "def read_image(filename, representation):\n image = imread(filename)\n new_image = image.astype(np.float64)\n new_image /= 255\n if representation == 1:\n new_image = rgb2gray(new_image)\n return new_image", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def read_image(path):\n reader = sitk.ImageSeriesReader()\n dicom_filenames = reader.GetGDCMSeriesFileNames(path)\n reader.SetFileNames(dicom_filenames)\n reader.LoadPrivateTagsOn()\n img = reader.Execute()\n img.SetOrigin((0, 0, 0))\n return img", "def newimage(self, infile):\n return _image.image_newimage(self, infile)" ]
[ "0.6418173", "0.6256043", "0.62217027", "0.6114637", "0.5992335", "0.59468126", "0.59468126", "0.58881605", "0.5819111", "0.58140945", "0.58045965", "0.57723343", "0.5748826", "0.5742564", "0.5734244", "0.57320607", "0.5721345", "0.57174456", "0.56965464", "0.56918967", "0.5688705", "0.5661655", "0.5629224", "0.56243324", "0.56212866", "0.56207865", "0.5603207", "0.55910975", "0.55898815", "0.5589065", "0.5564889", "0.55590904", "0.55545354", "0.5545744", "0.5541567", "0.55376494", "0.55298513", "0.5525955", "0.5523773", "0.5519269", "0.5511584", "0.54864657", "0.54843605", "0.54820913", "0.5481073", "0.54804564", "0.54776436", "0.54735655", "0.5470534", "0.5470529", "0.5470459", "0.54679286", "0.54665047", "0.546425", "0.5458502", "0.54584455", "0.5453658", "0.5447805", "0.544264", "0.54373425", "0.5432472", "0.5432472", "0.5429706", "0.54287976", "0.54268074", "0.5426315", "0.54226667", "0.5420298", "0.54140747", "0.5409816", "0.54062253", "0.5401723", "0.5401543", "0.5394775", "0.53920454", "0.5385204", "0.538081", "0.5375938", "0.53732735", "0.53702563", "0.53664416", "0.5358817", "0.5356141", "0.5356131", "0.53521943", "0.53508633", "0.53503436", "0.5349993", "0.53479445", "0.53453535", "0.53331155", "0.53183717", "0.5314725", "0.5312179", "0.5309585", "0.53043824", "0.5299054", "0.5298771", "0.5298367", "0.5296008", "0.52905214" ]
0.0
-1
Read an image from the disk stack, or return im's image from the list of images
def get_im(stackname, im = 0): if type(stackname) == type(""): e = EMData() e.read_image(stackname, im) return e else: return stackname[im].copy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_image(path):\n img = misc.imread(path)\n return img", "def read_image():\n images = []\n for hand in os.listdir('images'):\n img = cv2.imread(os.path.join('images', hand))\n if img is not None:\n images.append(img)\n return images", "def image(images):\n return images[0]", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def read_img(img_path): \n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def imread(fname):\r\n return skimage.io.imread(fname)", "def im_open(path):\n\n try:\n assert os.path.isdir(path)\n #get file list in directory - glob includes full path\n files = sorted(glob.glob('{}{}*'.format(path,os.sep)), key=sort_key) \n #load the collection\n raw_stack = io.imread_collection(files)\n #turn the collection into a np array and remove extraneous OCT portion from 1025:1083 on x axis. (z,y,x)\n #if .bmp files are open (from pv-oct), the slicing will not affect them, the x-axis is only 540 pixels.\n stack = io.collection.concatenate_images(raw_stack)[:,:,0:1024]\n \n return stack\n\n except AssertionError:\n sys.exit(\"A non-directory object was given to the __open__ function\")", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def read_img(img_path):\n return sitk.GetArrayFromImage(sitk.ReadImage(img_path))", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n from ipdb import set_trace; set_trace()\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def get_image(self, imnames, idx):\r\n path = os.path.join(self.img_path, imnames[idx])\r\n return Image.open(path).convert('RGB')", "def read_image(images_root):\n im_array = np.load(images_root)\n return im_array", "def read_image(img_path):\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def get_images(stage=0):\n return get_files(stage)[0]", "def read_images_from_disk(input_queue):\n\tlabel = input_queue[1]\n\tfile_contents = tf.read_file(input_queue[0])\n\texample = tf.image.decode_jpeg(file_contents, channels=3)\n\treturn example, label", "def read_image(path):\n img = ndimage.imread(path, mode=\"RGB\") \n return img", "def read_images_from_disk(input_queue):\n label = input_queue[1]\n file_contents = tf.read_file(input_queue[0])\n example = tf.image.decode_png(file_contents, channels=3)\n return example, label", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def read_image(path):\n reader = sitk.ImageSeriesReader()\n dicom_filenames = reader.GetGDCMSeriesFileNames(path)\n reader.SetFileNames(dicom_filenames)\n reader.LoadPrivateTagsOn()\n img = reader.Execute()\n img.SetOrigin((0, 0, 0))\n return img", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def imread(filename):\n filename = process(filename)\n ext = os.path.splitext(filename)[1]\n if ext.lower() == '.pfm':\n return load_pfm(filename)\n elif ext.lower() == '.dng':\n return load_dng(filename)\n else:\n loaded = cv2.imread(filename, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)\n if loaded is None:\n raise IOError('Could not read {0}'.format(filename))\n else:\n return loaded", "def img_read(name):\n\n img = cv2.imread(name)\n\n return img", "def read_image(img_path):\n got_img = False\n if not os.path.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img", "def read_image(img_path):\r\n got_img = False\r\n while not got_img:\r\n try:\r\n img = Image.open(img_path).convert('RGB')\r\n got_img = True\r\n except IOError:\r\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\r\n pass\r\n return img", "def read_image(image_path):\n if not os.path.exists(image_path):\n raise IOError('File does not exist: %s' % image_path)\n else:\n return Image.open(image_path)", "def Read(image_path):\n # use cv2.imread() to read an images.\n # syntax : cv2.imread(filename, flag=None)\n return cv2.imread(image_path, 0)", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')", "def image(self, name=None):\n return self.find(self.images(), name=name)", "def image(fname):\n return cv2.imread(fname)", "def read_image(img_path):\n\tgot_img = False\n\twhile not got_img:\n\t\ttry:\n\t\t\timg = Image.open(img_path).convert('RGB')\n\t\t\timg = img.resize((100,100),Image.ANTIALIAS)\n\t\t\tgot_img = True\n\t\texcept IOError:\n\t\t\tprint(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n\t\t\tpass\n\treturn img", "def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def iread(filename, *args, verbose=True, **kwargs):\n\n # determine if file is valid:\n # assert isinstance(filename, str), 'filename must be a string'\n\n\n # TODO read options for image\n # opt = {\n # 'uint8': False,\n # 'single': False,\n # 'double': False,\n # 'grey': False,\n # 'grey_709': False,\n # 'gamma': 'sRGB',\n # 'reduce': 1.0,\n # 'roi': None\n # }\n\n if isinstance(filename, str) and (filename.startswith(\"http://\") or filename.startswith(\"https://\")):\n # reading from a URL\n\n resp = urllib.request.urlopen(filename)\n array = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv.imdecode(array, -1)\n print(image.shape)\n return (image, filename)\n\n elif isinstance(filename, (str, Path)):\n # reading from a file\n\n path = Path(filename).expanduser()\n\n if any([c in \"?*\" for c in str(path)]):\n # contains wildcard characters, glob it\n # recurse and return a list\n # https://stackoverflow.com/questions/51108256/how-to-take-a-pathname-string-with-wildcards-and-resolve-the-glob-with-pathlib\n \n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n\n if len(pathlist) == 0 and not path.is_absolute():\n # look in the toolbox image folder\n path = Path(__file__).parent / \"images\" / path\n parts = path.parts[1:] if path.is_absolute() else path.parts\n p = Path(path.root).glob(str(Path(\"\").joinpath(*parts)))\n pathlist = list(p)\n \n if len(pathlist) == 0:\n raise ValueError(\"can't expand wildcard\")\n\n imlist = []\n pathlist.sort()\n for p in pathlist:\n imlist.append(iread(p, **kwargs))\n return imlist\n\n else:\n # read single file\n\n if not path.exists():\n if path.is_absolute():\n raise ValueError(f\"file {filename} does not exist\")\n # file doesn't exist\n # see if it matches the supplied images\n path = Path(__file__).parent / \"images\" / path\n\n if not path.exists():\n raise ValueError(f\"file {filename} does not exist, and not found in supplied images\")\n\n # read the image\n # TODO not sure the following will work on Windows\n im = cv.imread(path.as_posix(), **kwargs) # default read-in as BGR\n\n if im is None:\n # TODO check ValueError\n raise ValueError(f\"Could not read {filename}\")\n\n return (im, str(path))\n\n elif islistof(filename, (str, Path)):\n # list of filenames or URLs\n # assume none of these are wildcards, TODO should check\n out = []\n for file in filename:\n out.append(iread(file, *args))\n return out\n else:\n raise ValueError(filename, 'invalid filename')", "def imread(filename, *args, **kwargs):\r\n try:\r\n netpbm = NetpbmFile(filename)\r\n image = netpbm.asarray()\r\n finally:\r\n netpbm.close()\r\n return image", "def read_img(path):\r\n if os.path.isfile(path):\r\n return cv2.imread(path)\r\n else:\r\n raise ValueError('hiiiiiiiiii')", "def imread(path):\n img = cv2.imread(path)\n return img", "def get_input(path):\n img = imread(path)\n return img", "def get_image(image_path):\r\n\r\n return Image.open(image_path)", "def read(self):\n with self.lock:\n return self.image", "def read_img(img_path):\n img_list=[]\n print('image loading...')\n for _,_,files in os.walk(img_path):\n for f in files:\n if f.find('.dcm')>=0:\n tmp_img=dicom.dcmread(os.path.join(img_path,f))\n tmp_img=tmp_img.pixel_array#[0::2,0::2]\n img_list.append(tmp_img)\n img_data=np.array(img_list)\n print('done')\n return img_data", "def checkForImage(self, PATH=\"./Input/\"):\n files = [ _ for _ in sorted(os.listdir(PATH)) if \".png\" in _ and \"Image\" in _ ]\n if len(files) > 0:\n f = files[0]\n im = cv2.imread(PATH + f)\n os.remove(PATH + f)\n return im\n else:\n return None", "def load_image(imfile):\n\n im = cv2.imread(imfile)\n\n return [im]", "def read_image(image_path, *args, **kwargs):\n # TODO: Implement the method\n image2 = Image.open(image_path)\n image = num.asarray(image2)\n\n return image", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def read_image(filename, representation):\n im = imread(filename)\n if representation == GS_REP:\n im = rgb2gray(im)\n im = np.divide(im, MAX_VALUE - 1)\n return im", "def read_image(filename):\n img = Image.open(filename)\n im = np.array(img)\n return im", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def load(path) -> Image:\n return Image.open(path)", "def GetImage(self, which=TreeItemIcon_Normal):\r\n \r\n return self._images[which]", "def read_image(fs, img_path, mode=\"rb\"):\n f = fs.open(img_path, mode)\n pil_img = Image.open(f)\n img_array = np.asarray(pil_img, dtype=np.uint8)\n f.close()\n return img_array", "def get_input(self, idx):\r\n img_filename = self.root / \"images\" / self._image_array[idx]\r\n x = Image.open(img_filename)\r\n return x", "def read_image_from_fs(name):\n with open(name, \"rb\") as fin:\n return fin.read()", "def imread(filename):\n return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]", "def get_image(inf):\n try:\n x, y = Image.open(inf).size\n except FileNotFoundError:\n print(\"Error: {} file not found.\".format(inf))\n sys.exit(1)\n\n pixels = list(Image.open(inf).getdata())\n return x, y, pixels", "def read_im(im_path):\n im = cv2.imread(im_path)\n return im", "def load_image(self, image_id):\n \n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path,\"images\")\n file_list = os.listdir(impath) \n channels = info['channels']\n \n image = []\n \n # stack channels to be loaded.\n \n for channel in channels:\n \n if channel == \"none\":\n channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )\n \n else:\n channel_image_name = [x for x in file_list if channel in x][0] \n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n \n image = np.stack(image, axis=2)\n \n return image", "def get_image(name):\r\n return nova.images.find(name=name)", "def read_image(self, filePath):\n if filePath.endswith(\".dcm\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.expand_dims(image[0,:,:], -1)\n elif filePath.endswith(\".png\"):\n image = cv2.imread(filePath)\n image = np.array(image, dtype = \"int16\")\n elif filePath.endswith(\".mha\"):\n image = sitk.ReadImage(filePath)\n image = sitk.GetArrayFromImage(image).astype(\"int16\")\n image = np.transpose(image,(1,2,0))\n return image", "def load_image(file_path):\r\n return Image.open(file_path)", "def imread(img_path):\n if not os.path.exists(img_path):\n raise ImageNotFoundError(f\"Image {img_path} could'nt be located\")\n\n img = cv2.imread(img_path)\n\n if img is None:\n raise InvalidImageError(f\"Image {img_path} could'nt be loaded\")\n\n return img", "def get_image(path):\n\n # Check if the picture exists or not.\n if not os.path.isfile(path):\n print('Cannot open the image. Please try again!')\n exit(1)\n\n try:\n # Open the image.\n image = Image.open(path)\n\n # If everything is okay return it.\n return image\n # If an error occurred.\n except Exception as err:\n print('Error occurred while trying to open the image:', err, 'Please try again!')\n exit(1)", "def load_image(self, image_index):\n\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\tpath = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])\n\t\t\treturn read_image_bgr(path)", "def img_in(filename):\n temp_img = Image.open(filename)\n img = np.array(temp_img)\n name = filename.split('.')[-2]\n return name, img", "def returnImages(input_data):\r\n if type(input_data) is list:\r\n return [returnImages(element) for element in input_data]\r\n else:\r\n return cv.imread(input_data, 0)", "def read_images(fs, img_path_batch, mode=\"rb\"):\n result = []\n logging.info(\"Start to read images at {}\".format(socket.gethostname()))\n for (label, img_path) in img_path_batch:\n img = read_image(fs, img_path, mode)\n result.append((label, img))\n logging.info(\"Finish the reading of {} images on {}\".format(\n len(result), socket.gethostname()))\n return result", "def img(filename='steine.jpg'):\n\treturn mpimg.imread('/content/drive/My Drive/colab/images/'+filename)", "def read_image(fileame, representation):\n validate_representation(representation)\n\n im = imread(fileame)\n if representation == 1 and is_rgb(im):\n # We should convert from Grayscale to RGB\n im = rgb2gray(im)\n return im.astype(np.float32)\n\n return normlized_image(im)", "def read_image(img_path, show=False):\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n if not img.dtype == np.uint8:\n pass\n\n if show:\n show_image(img)\n\n img = [list(row) for row in img]\n return img", "def read_image_from_path(path: str, num_channels: Optional[int]=None, return_num_bytes=False) ->Union[Optional[torch.Tensor], Tuple[Optional[torch.Tensor], int]]:\n bytes_obj = get_bytes_obj_from_path(path)\n image = read_image_from_bytes_obj(bytes_obj, num_channels)\n if return_num_bytes:\n if bytes_obj is not None:\n num_bytes = len(bytes_obj)\n else:\n num_bytes = None\n return image, num_bytes\n else:\n return image", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def load_single_image(image_path, dim=100):\n if not isinstance(image_path, str):\n img = Image.open(image_path)\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = preprocess_data(img, dim)\n else:\n img = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = preprocess_data(img, dim)\n\n img = np.array([img])\n\n return img", "def read_image(img_name, grey=False, use_opencv=False, uint8=False):\n data_dir = Path('..') / 'img'\n if use_opencv:\n if grey:\n img = cv_imread(data_dir / img_name, 0)\n else:\n img = cv_imread(data_dir / img_name)\n else:\n img = imread(data_dir / img_name, as_gray=grey)\n if uint8 and img.dtype != 'uint8':\n img = np.uint8(img)\n return img", "def __read_image(self, path):\n path = 'data/' + path\n image = cv2.imread(path)\n\n # Convert greyscale image to BGR\n if image.shape[-1] == 1:\n image = np.dstack([image, image, image])\n\n # Convert BGR image to RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n return image", "def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_file():\n img = image.imread(path)\n print(f\"Loading image of dimensions {img.shape[0]} x \"\n f\"{img.shape[1]}\")\n return np.array(img)\n raise FileNotFoundError", "def __readImages(self, filename):\n print 'Reading images from %s ...' % filename\n images = []\n with open(filename, 'rb') as infile:\n infile.read(4) # ignore magic number\n count = struct.unpack('>i', infile.read(4))[0]\n rows = struct.unpack('>i', infile.read(4))[0]\n columns = struct.unpack('>i', infile.read(4))[0]\n\n for i in xrange(count):\n data = infile.read(rows*columns)\n image = np.fromstring(data, dtype=np.uint8)\n image = image.reshape((rows, columns))\n image = 255 - image # now black digit on white background\n images.append(image)\n return images", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def read_image(image_path: str) -> np.ndarray:\n assert image_path.exists()\n try:\n with Image.open(image_path) as img:\n image = np.array(img)\n except OSError as e:\n raise OSError(e)\n return image", "def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)", "def reader(self, idx):\n # Get the path of input image and groundtruth mask.\n input_path, gtmask_path = self.imgs[idx]\n input_img, gt_img = self.loader(input_path, gtmask_path)\n return input_img, gt_img", "def image_by_tag(self, tag):\n if not tag:\n return None\n\n return next((image for image in self.images() if tag\n in image['RepoTags']), None)", "def tiffread(f):\n if type(f) is str:\n # single image\n im = tf.imread(f)\n return im\n\n elif type(f) is list and len(f) == 3:\n # return rgb stack\n f.sort(reverse=True) # so r, g, b\n ims = [tf.imread(x) for x in f]\n return np.dstack(ims)\n else:\n raise ValueError(\"f must be a string or list of 3 strings\")", "def read_image(self, item):\n assert item['image_dtype'] == 'uint16'\n\n filename = os.path.join(self.home(item['basename']))\n s = open(filename, 'rb').read()\n assert hashlib.md5(s).hexdigest() == item['md5']\n img = np.fromstring(s, dtype=item['image_dtype']).byteswap()\n img = img.reshape(item['image_shape'])\n return img", "def read_multiple_images(self, rng=None, return_info=False):\n acq_status=self.get_acquisition_status()\n new_images_rng=self.get_new_images_range()\n if rng is None:\n rng=new_images_rng\n if rng is None:\n return [] if return_info else [],[]\n rng=list(rng)\n rng[0]=max(rng[0],acq_status.acquired-len(self._buffers))\n rng[1]=min(rng[1],acq_status.acquired-1)\n if rng is None:\n return [] if return_info else [],[]\n self._next_read_buffer=max(self._next_read_buffer,rng[1]+1)\n frames=[self._read_buffer(i) for i in range(rng[0],rng[1]+1)]\n imgs,infos=list(zip(*frames))\n imgs=[image_utils.convert_image_indexing(im,\"rct\",self.image_indexing) for im in imgs]\n if return_info:\n return imgs,infos\n else:\n return imgs", "def imread(fname):\n try:\n fp = open(fname, 'rb')\n im = Image.open(fp)\n except:\n sys.stderr.write('IOException: Invalid input type on '+fname+'\\n')\n sys.exit(1)\n else:\n if im.format not in FILETYPES:\n sys.stderr.write('IOException: Invalid image type\\n')\n sys.exit(1)\n \n fa = np.array(im.convert('F'))\n im = im.convert('RGB')\n wa = np.array(im)\n \n fp.close()\n\n return fa, wa", "def readImages(respository,*rescale):\n record = []\n onlyfiles = [f for f in listdir(respository) if isfile(join(respository, f))]\n for image in onlyfiles:\n record = record+[readImage(join(respository, image),[0,1,2],rescale)]\n return record\n pass", "def loadImage(self, path: str) -> ndarray:\n try:\n self.img = np.asarray(Image.open(path))\n\n except FileNotFoundError:\n\n print(\"NO such File {}\".format(path))\n return None\n return self.img", "def read_image(fname, roi=None, dset_name='default', parallelism=1):\n\n from functools import partial\n from numpy import array, ndarray\n from multiprocessing import Pool, cpu_count\n\n if isinstance(fname, str):\n fmt = fname.split('.')[-1]\n \n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n \n result = reader(fname)\n\n elif isinstance(fname, (tuple, list, ndarray)):\n fmt = fname[0].split('.')[-1]\n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n\n if parallelism == 1:\n result = array([reader(f) for f in fname])\n\n else:\n if parallelism == -1:\n num_cores = cpu_count()\n else:\n num_cores = min(parallelism, cpu_count())\n\n with Pool(num_cores) as pool:\n result = array(pool.map(reader, fname))\n else:\n raise TypeError(\n \"First argument must be string for a one file or (tuple, list, ndarray) for many files\"\n )\n\n return result", "def _read_image_from_file(file_name):\n image_file = open(file_name, 'rb')\n image = image_file.read()\n image_file.close()\n return image", "def read_local(path):\n files = os.listdir(path)\n imgs = []\n for f in files:\n if f.endswith(\".tiff\") or f.endswith(\".tif\"):\n img = Image.open(os.path.join(path, f))\n imgs.append(np.array(img))\n return imgs", "def read_image(image_png_path, image_png_file):\n # Image\n f = os.path.join(image_png_path, image_png_file)\n #image = cv2.imread(f, cv2.IMREAD_GRAYSCALE)*2\n image = cv2.imread(f, -1)\n return image", "def load_images(tags_pict):\n img_data_list = []\n for p in tags_pict.index :\n img_path = tags_pict.full_path[p]\n img = load_img(img_path, target_size= inputShape)\n x = img_to_array(img)\n x = np.expand_dims(img, axis=0)\n # pre-process the image using the appropriate function based on the\n # model that has been loaded (i.e., mean subtraction, scaling, etc.)\n x = preprocess_input(x)\n img_data_list.append(x)\n img_data = np.array(img_data_list)\n img_data=np.rollaxis(img_data,1,0)\n img_data=img_data[0]\n return(img_data)", "def openFile(path_name):\n if os.path.isdir(path_name):\n reader = sitk.ImageSeriesReader()\n dicom_names = reader.GetGDCMSeriesFileNames(path_name)\n reader.SetFileNames(dicom_names)\n image_object = reader.Execute()\n \n elif os.path.isfile(path_name):\n image_object = sitk.ReadImage(path_name)\n\n else:\n print(\"Path name wrong.\")\n return None\n\n return image_object", "def load_image(self, image_index):\n image = self.selected[image_index].iloc[0]\n self._download_image(image)\n\n return read_image_bgr(path)", "def _openImage(self, fname):\n image = cv2.imread(fname,0)\n\n if(image != None):\n return image\n else:\n raise IOError, \"Image file can not be opened\"", "def _open_img(self, img_name):\n try:\n img = Image.open(img_name)\n photo = ImageTk.PhotoImage(img)\n return photo\n except IOError:\n Debug.printi(\"Unable to find image \" + img_name, Debug.Level.ERROR)", "def pick_image(images, idx):\r\n if type(images) == list:\r\n return [pick_image(r, idx) for r in images]\r\n if idx is None:\r\n return images[:, 0]\r\n elif type(idx) == int:\r\n return images[:, idx]\r\n \r\n idx = idx.astype('long').numpy()\r\n images = L.stack([images[i][int(idx[i])] for i in range(images.shape[0])])\r\n return images", "def get_image(self, image):\n return self._get(_image.Image, image)", "def read_image(image_path: str):\n\treturn cv.imread(image_path, cv.IMREAD_UNCHANGED)" ]
[ "0.6800794", "0.6722553", "0.66451883", "0.6614246", "0.6607827", "0.6595313", "0.65405977", "0.653579", "0.64773405", "0.64660364", "0.64657694", "0.6447562", "0.6427345", "0.64263606", "0.64116603", "0.64041156", "0.6394776", "0.638916", "0.63824886", "0.6381627", "0.63719296", "0.6351476", "0.63110435", "0.6285904", "0.6283162", "0.6255123", "0.62497747", "0.6245467", "0.62394613", "0.6224843", "0.6218147", "0.6205187", "0.6199719", "0.6197163", "0.6194575", "0.6185378", "0.61471677", "0.6143097", "0.61419874", "0.6138473", "0.610165", "0.6070114", "0.60592586", "0.60534203", "0.6049913", "0.60410035", "0.60409987", "0.6020364", "0.599915", "0.5998534", "0.598903", "0.59808576", "0.59590447", "0.5954638", "0.5941597", "0.5936841", "0.59167385", "0.59152347", "0.59150857", "0.59101766", "0.59085536", "0.58990675", "0.5897365", "0.58844894", "0.5876571", "0.5844663", "0.5839278", "0.583013", "0.58284783", "0.5820599", "0.58194906", "0.58194906", "0.58166414", "0.5815466", "0.58137834", "0.57997173", "0.57954484", "0.57926613", "0.57867414", "0.5786264", "0.5779528", "0.5773845", "0.5773313", "0.57708853", "0.57699096", "0.57675636", "0.5764017", "0.5760257", "0.5752794", "0.57459706", "0.5741736", "0.57399684", "0.5731971", "0.5728787", "0.5728405", "0.5712791", "0.571277", "0.5711759", "0.57071906", "0.5702931" ]
0.6380314
20
Return a NumPy array containing the image data.
def get_image_data(img): from EMAN2 import EMNumPy return EMNumPy.em2numpy(img)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_array(self) -> numpy.array:\r\n \r\n return self.pic_array", "def array_from_img(image):\n return np.array(image)", "def to_array(self):\n return np.array(self.to_image())", "def buffer(self) -> np.ndarray:\n return np.array(self._image_data, copy=False)", "def __array__(self):\n return np.asarray(self.data)", "def buffer_data_numpy(self) -> np.ndarray:\n # mask the last 4 bytes to reduce pixel format to mono/color mode and bit width info\n pixel_format = self.data.pixelFormat & 0xFFFF0000\n try:\n arr_dtype, arr_channels = PIXELFORMAT_TO_DTYPE_CHANNELS[pixel_format]\n except KeyError as ex:\n raise NotImplementedError('Pixel format not supported!') from ex\n\n arr_shape = (self.data.height, self.data.width, arr_channels) if arr_channels > 1 \\\n else (self.data.height, self.data.width)\n\n return np.ndarray(buffer=self.buffer_data(),\n dtype=arr_dtype,\n shape=arr_shape)", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def image_to_array(self, img):\n x = np.asarray(img, dtype=self.dtype)\n if len(x.shape) == 3:\n if self.channels_first:\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if self.channels_first:\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x", "def load_image_into_numpy_array(self, path):\n \n return np.array(Image.open(path))", "def data(self):\n return self._img", "def get_image(self):\n image = np.frombuffer(self.image, dtype=np.uint8)\n return image.reshape(*self.size, self.channels)", "def img_to_array(img, path=True):\n global width, height\n\n if path:\n img = Image.open(img)\n img_arr = np.array(img) / 255.0\n img_arr = img_arr.reshape(width, height, channels)\n \n return img_arr", "def load_image_as_array(filename):\n im = Image.open(filename)\n arr = np.asarray(im)\n return arr", "def get_img():\n\timg = camera.Capture()\n\tarray = jetson.utils.cudaToNumpy(img)\n\n\treturn(array)", "def load_image_into_numpy_array(img):\n img_data = tf.io.gfile.GFile(img, 'rb').read()\n image = Image.open(BytesIO(img_data)).resize(INPUT_SIZE)\n (im_width, im_height) = image.size\n arr = np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n return arr", "def image(self):\n return self.pixels.get_array()", "def numpy(self):\n return self.data", "def read_image(image_path):\n im = Image.open(image_path, 'r')\n return np.array(im)", "def array(self):\n return np.asarray(self)", "def image2array(im):\n\n arr = numpy.zeros(im.size)\n\n for x in xrange(im.size[0]):\n for y in xrange(im.size[1]):\n arr[x,y] = im.getpixel((x,y))\n\n return arr", "def bytes_to_np(img: bytes) -> np.ndarray:\n im = Image.open(BytesIO(img))\n im = im.convert(\"RGB\")\n return np.array(im)", "def np_image_matrix(self):\n return np.array(self.crop_image())", "def data_array(self):\n return self._data_array", "def convert_canvas_image_to_array(image_data):\n # Split the image data from the metadata\n split_str = b'base64,'\n index = image_data.find(split_str) + len(split_str)\n # Decode the image data from base64\n png_data = base64.b64decode(image_data[index:])\n # Create a PIL image from the bytes\n image = PIL.Image.open(BytesIO(png_data))\n # Resize image\n image = image.resize([IMAGE_SIZE, IMAGE_SIZE], PIL.Image.LANCZOS)\n # Reshape into a 4D numpy tensor\n pix = np.array(image, dtype='float32')\n pix = pix[..., 3]\n pix = pix.reshape(1, 28, 28, 1)\n # Normalize from [0-255] -> [0,1]\n pix /= 255\n return pix", "def data(self) -> np.ndarray:\n return self._data", "def carla_rgb_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.Raw) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def read_img(path):\n img = Image.open(path)\n img_arr = np.array(img, dtype='int32')\n img.close()\n return img_arr", "def data(self) -> List[ndarray]:\n return self._data", "def get_array(self):\n return numpy.array(self._ar)", "def read_image_to_array(image_name='default.png'):\n\timg = Image.open(image_name)\n\tres_arr = []\n\n\tfor y in range(img.height):\n\t\tres_arr.append([])\n\n\t\tfor x in range(img.width):\n\t\t\tpixel = img.getpixel((x,y))\n\t\t\tres_arr[y].append(is_there_life(pixel))\n\treturn res_arr", "def load_image_into_numpy_array(path):\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)", "def load_image_into_numpy_array(path):\r\n \r\n return np.array(Image.open(path))", "def load_image_into_numpy_array(path):\n return np.array(Image.open(path))", "def load_image_into_numpy_array(path):\n return np.array(Image.open(path))", "def AsNumpy(self):\n return np.array([\n self._center[0], self._center[1], self._width, self._length, self._angle\n ])", "def vtk_image_to_numpy(image):\n data = vtk_to_numpy(image.GetPointData().GetScalars())\n data.shape = get_vtk_image_shape(image)\n return data", "def _load(self) -> np.ndarray:\n with self._fs.open(self._filepath, mode=\"r\") as f:\n image = Image.open(f).convert(\"RGBA\")\n return np.asarray(image)", "def imagefile_to_array(imagefname):\n with Image.open(imagefname) as image: \n im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)\n rows = image.size[1]\n cols = image.size[0]\n no_channels = int(len(im_arr)/rows/cols)\n im_arr = im_arr.reshape((rows, cols, no_channels))\n im_arr = np.rollaxis(im_arr,-1)\n return im_arr", "def as_numpy_array(self):\n return self.frame", "def data(self) -> List[JpegImageFile]:\n return self._data", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def get_image_array(self):\n with picamera.array.PiRGBArray(self.camera) as output:\n self.camera.resolution = (640, 480)\n self.camera.capture(output, 'rgb')\n logging.info(\"Captured image of size {0}x{1}x{2}\".format(\n output.array.shape[0], output.array.shape[1], output.array.shape[2]))\n output.truncate(0)\n return output.array\n # self.camera.capture_continuous(self.stream, format='jpeg', use_video_port=True)\n # self.stream.seek(0)\n # image = Image.open(self.stream).convert('RGB').resize((self._input_width, self._input_height), Image.ANTIALIAS)\n # self.stream.seek(0)\n # self.stream.truncate()\n # self.camera.close()", "def getData(self):\n return self._array", "def makeArray(imagePath):\r\n array = None\r\n imageExists = (imagePath and len(imagePath)>0 and len(glob(imagePath))>0)\r\n if (imageExists):\r\n try:\r\n image = gdal.Open(imagePath)\r\n array = image.ReadAsArray()\r\n except:\r\n print \"Could not open/convert src land use image\"\r\n else:\r\n print \"File: %s successfully read and converted to array.\" % (imagePath)\r\n else:\r\n print \"LandUse.makeArray says that the imagePath: %s is not correct\" % (imagePath)\r\n\r\n DTYPE = numpy.uint8\r\n array.dtype = DTYPE\r\n return array", "def features_to_np_array(self, images):\n \n images = list(images)\n \n images = np.stack(images, axis=0)\n \n return images", "def to_fits_array(self):\n return self.data", "def get_image_array_from_example(example):\n features = example.features.feature\n img = features['image/encoded'].bytes_list.value[0]\n shape = features['image/shape'].int64_list.value[0:3]\n return np.frombuffer(img, np.uint8).reshape(shape)", "def data(self):\n return self.image", "def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')", "def array(self):\n return self.get_array()", "def data(self):\n self._data: np.ndarray\n return self._data", "def toNpArray(row):\n image = row[0]\n height = image.height\n width = image.width\n nChannels = image.nChannels\n\n return np.ndarray(\n shape=(height, width, nChannels),\n dtype=np.uint8,\n buffer=image.data,\n strides=(width * nChannels, nChannels, 1))", "def readArray(input):\n data = gdal.Open(input)\n band = data.GetRasterBand(1)\n \n return band.ReadAsArray()", "def read(self) -> np.array:\n return self._stream.read(self._frame_size)", "def to_numpy(self) -> np.ndarray:\n return self.frame", "def get_data(self):\n oshape = (ctypes.c_uint * 4)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetData(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)", "def raw_image(self):\n return self.data16.transpose()", "def load_image_into_numpy_array(img_path, mode=\"int32\"):\n try:\n img = Image.open(img_path)\n img.load()\n data = np.asarray(img, dtype=mode)\n return data\n except PIL.UnidentifiedImageError:\n logging.warning(\"Can't load file! Deleting this file...\")\n os.remove(img_path)\n return None", "def array(self):\n return np.array([self.w, self.x, self.y, self.z])", "def imgsz(self) -> np.ndarray:\n return self._vector[6:8].astype(int)", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def readData():\n\tN = 800\n\tD = 28*28\n\tX = np.zeros((N, D), dtype=np.uint8)\n\n\tf = open(\"data/a012_images.dat\", 'rb')\n\n\tfor i in range(0, N):\n\t\tX[i, :] = np.fromstring(f.read(D), dtype='uint8')\n\n\tf.close()\n\n\treturn X", "def img2np_pillow(filename):\n with Image.open(filename) as image:\n nparr = np.fromstring(image.tobytes(), dtype=np.uint8)\n nparr = im_arr.reshape((image.size[1], image.size[0], 3))\n return nparr", "def array(self):\n aa = list(map(np.asarray, self.loader_array.flat))\n return np.stack(aa, axis=0).reshape(self.output_shape)", "def convert_image_to_array(image):\n w, h = image.size\n\n # we support black and white images only.\n if \"P\" in image.getbands():\n #a = image.split()\n #a = image.tostring()\n img_array = np.fromstring(image.tostring(), dtype=np.uint8)\n return img_array.reshape((w, h))\n else:\n raise TypeError(\"Wrong Image type. Only B&W images are supported\")", "def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images", "def read(self, index):\n assert type(index) is int\n img = self.db.get_node('/images/img{:04d}'.format(index))\n return np.array(img)", "def to_numpy(self) -> np.ndarray:\n log_advice(\n \"`to_numpy` loads all data into the driver's memory. \"\n \"It should only be used if the resulting NumPy ndarray is expected to be small.\"\n )\n return cast(np.ndarray, self._to_pandas().values)", "def get_array(self, scale=1):\n array = cv2.imread(str(self.path), self.read_type)\n\n # resize original image so it can be be scaled without fractions\n x_extra = array.shape[0] % self.scaling\n y_extra = array.shape[1] % self.scaling\n\n x_extra = self.scaling - x_extra if x_extra != 0 else x_extra\n y_extra = self.scaling - y_extra if y_extra != 0 else y_extra\n\n padded_array = cv2.resize(\n array, (int(array.shape[1] + y_extra), int(array.shape[0] + x_extra))\n )\n\n # scale image\n resized_array = cv2.resize(\n padded_array,\n (int(padded_array.shape[1] * scale), int(padded_array.shape[0] * scale)),\n )\n\n # cv2 reads in array as BGR, tensorboard shows as RGB\n if not self.greyscale:\n x = np.copy(resized_array)\n resized_array[:, :, 0] = x[:, :, 2]\n resized_array[:, :, 2] = x[:, :, 0]\n\n # cv2.imshow('image',array)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n if self.greyscale:\n resized_array = np.expand_dims(resized_array, 2)\n return resized_array", "def ndarray(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n data = self.clear().data.collect()\n\n result = np.zeros(self._shape, dtype=self._dtype)\n\n for e in data:\n result[e[0], e[1]] = e[2]\n\n return result", "def numpy(self) -> np.ndarray:\n return self.tensor.numpy()", "def loadImage(img_path):\n\n img = Image.open(img_path)\n np_img = np.array(img)\n return (np_img)", "def get_raw_data(self):\n if self._img and self.is_4d():\n temp = self._img.get_data(caching='unchanged')\n temp = np.rot90(temp)\n for tp in self._loaded_time_list:\n temp[..., tp] = self._data[..., tp]\n else:\n temp = self._data.copy()\n\n return np.rot90(temp, 3)", "def read_image(filename):\n\n from matplotlib.image import pil_to_array\n\n with Image.open(filename) as image:\n return pil_to_array(image)", "def get_data ( self ):\n return self._data_pntr.ReadAsArray()", "def get_data ( self ):\n return self._data_pntr.ReadAsArray()", "def _colored_img_to_arr(image, verbose=False):\n height, width = image.size\n arr = np.array(image.getdata())\n arr = arr.reshape(3, height, width)\n r = arr[0]\n g = arr[1]\n b = arr[2]\n return r, g, b", "def tiff_to_ndarray(fn):\n return tifffile.imread(fn)", "def get_arr_from_nii(nii_path: str) -> np.ndarray:\n nii = nib.load(nii_path)\n return nii.get_data()", "def asarray(self):\n from numpy import asarray\n return asarray(self)", "def get_raw(self) -> bytearray:\n img_bytes = bytearray()\n for i in range(self.grid_size[0]):\n if self.grid[i] is not None:\n for j in range(self.grid_size[1]):\n if self.grid[i][j] is not None:\n color = self.grid[i][j]\n color = color.get_byte_representation()\n for k in range(len(color)):\n img_bytes.append(color[k])\n return img_bytes", "def get_correction_array(self):\n import numpy\n\n # Select the first datablock and rewind all the categories\n self.cbf_handle.select_datablock(0)\n self.cbf_handle.select_category(0)\n self.cbf_handle.select_column(2)\n self.cbf_handle.select_row(0)\n\n # Check the type of the element to ensure it's a binary\n # otherwise raise an exception\n type = self.cbf_handle.get_typeofvalue()\n if type.find('bnry') > -1:\n\n # Read the image data into an array\n image_string = self.cbf_handle.get_integerarray_as_string()\n image = numpy.fromstring(image_string, numpy.int32)\n\n # Get the array parameters\n parameters = self.cbf_handle.get_integerarrayparameters_wdims()\n image_size = (parameters[10], parameters[9])\n\n # Resize the image\n image.shape = (image_size)\n\n else:\n raise TypeError('Can\\'t find image')\n\n # Return the image\n return image", "def img_to_array(img, data_format=None, dtype=None):\n if data_format is None:\n data_format = settings.DEFAULT_IMAGE_DATA_FORMAT\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ', data_format)\n # Numpy array x has format (height, width, channel)\n # or (channel, height, width)\n # but original PIL image has format (width, height, channel)\n if dtype is None:\n x = np.asarray(img, dtype=settings.DEFAULT_NUMPY_FLOAT_DTYPE)\n else:\n x = np.asarray(img, dtype=dtype)\n\n if len(x.shape) == 3:\n if data_format == 'channels_first':\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if data_format == 'channels_first':\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x", "def to_numpy(self):\n return numpy.vstack((self.mz, self.intensities)).T", "def load_data() -> np.ndarray:\n \n # Create a data directory if it doesn't exist.\n data_dir_path = find_or_create_dir(\"data\")\n \n # Download the data file if it doesn't exist.\n data_file_path = os.path.join(data_dir_path, \"Testdata.mat\")\n if not os.path.exists(data_file_path):\n print(\"Downloading data file...\")\n data_url = \"https://bea-portfolio.s3-us-west-2.amazonaws.com/denoising-3D-scans/Testdata.mat\"\n with urlopen(data_url) as response:\n with open(data_file_path, \"wb\") as data_file:\n shutil.copyfileobj(response, data_file)\n print(\"Done downloading data file.\")\n\n # Load data into memory.\n data_file = loadmat(data_file_path, struct_as_record=False)\n data = data_file['Undata']\n # data.shape is 20 x 262144\n\n return data", "def get_data():\n data = [np.array([32.,595.]),\n np.array([30.,599.]),\n np.array([18.,622.]),\n np.array([51.,606.]),\n np.array([38.,578.])]\n return data", "def to_array(self) -> np.ndarray:\n return self.A", "def get_itk_array(path_or_image):\n\n if isinstance(path_or_image, str):\n image = get_itk_image(path_or_image)\n else:\n image = path_or_image\n\n arr = itk.GetArrayFromImage(image)\n\n return arr", "def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member\n image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = array.astype(np.float32) / 255\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n return array", "def get_y(img):\n\theight = img.shape[0]\n\twidth = img.shape[1]\n\ty_data = np.empty([height,width])\n\tfor i in np.arange(height):\n\t\tfor j in np.arange(width):\n\t\t\ty_data[i][j] = img[i][j][0]\n\treturn y_data", "def arr(self):\n return self._arr", "def __array__(self, dtype=None) -> np.ndarray:\n return self.values", "def array(self):\n return stack_loader_array(self.loader_array).reshape(self.output_shape)", "def get_numpy_array(self):\r\n\r\n # This holds the obect's spectral data, and will be passed to\r\n # numpy.array() to create a numpy array (matrix) for the matrix math\r\n # that will be done during the conversion to XYZ.\r\n values = []\r\n\r\n # Use the required value list to build this dynamically. Default to\r\n # 0.0, since that ultimately won't affect the outcome due to the math\r\n # involved.\r\n for val in self.VALUES:\r\n values.append(getattr(self, val, 0.0))\r\n\r\n # Create and the actual numpy array/matrix from the spectral list.\r\n color_array = numpy.array([values])\r\n return color_array", "def load_img(path: str) -> np.ndarray:\n \n return np.array(Image.open(path))", "def fig2array(fig):\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n buf.shape = (w, h, 3)\n return buf", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr", "def getimg(filename):\n return np.asarray(Image.open('imgdb/'+filename))", "def __array__(self, *args, **kwargs):\n\n return self.data", "def load_image_into_numpy_array(path):\n MAX_SIZE = (1440, 1080)\n # img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(path)\n # print(\"Image size before: {}\".format(image.size))\n image.thumbnail(MAX_SIZE)\n # print(\"Image size after resized: {}\".format(image.size))\n (im_width, im_height) = image.size\n return np.array(image).astype(np.uint8)" ]
[ "0.78545177", "0.78410083", "0.77582353", "0.75947237", "0.7353946", "0.7335702", "0.7164609", "0.7154952", "0.7121806", "0.7093889", "0.7029067", "0.7018716", "0.7007183", "0.7005928", "0.7005361", "0.69826096", "0.6962062", "0.6913088", "0.69027466", "0.6895016", "0.68866277", "0.6886136", "0.68799126", "0.6878591", "0.6844954", "0.681627", "0.6811979", "0.6805302", "0.6795753", "0.6781611", "0.6757598", "0.6741874", "0.67374206", "0.67374206", "0.6694875", "0.6686584", "0.6671571", "0.6666458", "0.66656226", "0.6654805", "0.6600016", "0.65951", "0.65938383", "0.65705943", "0.656358", "0.6549874", "0.6535568", "0.6524627", "0.6519505", "0.6511918", "0.6510487", "0.6506209", "0.6498737", "0.6493321", "0.6488225", "0.6480754", "0.64704657", "0.6467341", "0.6465266", "0.64398444", "0.6433447", "0.6426784", "0.6421605", "0.639767", "0.63955253", "0.6380657", "0.63785946", "0.63574356", "0.63486385", "0.6338367", "0.633455", "0.6334295", "0.6333234", "0.63300735", "0.6326275", "0.6326275", "0.6323831", "0.63200325", "0.62972564", "0.6296652", "0.6294092", "0.6286552", "0.6286162", "0.62808275", "0.62782556", "0.6275793", "0.625156", "0.62506914", "0.62448716", "0.62361246", "0.6235492", "0.6233427", "0.6223751", "0.6223743", "0.62222284", "0.62182754", "0.6217405", "0.6215582", "0.6207915", "0.6204349" ]
0.7611293
3
Get the in_plane angle from two images and output the crosss correlation value The function won't destroy input two images This is the angle that rotates the first image, ima, into the second image, ref. The sense of the rotation is clockwise. center=1 means image is first centered, then rotation angle is found
def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1): from alignment import Numrinit, ringwe, Applyws, ormq from filter import fshift first_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp) nx=ima.get_xsize() if(last_ring == -1): last_ring=int(nx/2)-2 cnx = int(nx/2)+1 cny = cnx mode = "F" #precalculate rings numr = Numrinit(first_ring, last_ring, rstep, mode) wr = ringwe(numr, mode) if(center==1): cs = [0.0]*2 # additio cs = ref.phase_cog() ref1 = fshift(ref, -cs[0], -cs[1]) cimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode) cs = ima.phase_cog() ima1 = fshift(ima, -cs[0], -cs[1]) else: ima1=ima.copy() cimage=Util.Polar2Dm(ref, cnx, cny, numr, mode) Util.Frngs(cimage, numr) Applyws(cimage, numr, wr) [angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny) return angt,sxst, syst, mirrort, peakt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_opt_rotate(obj_img, back_img,\n back_center_x, back_center_y,\n obj_center_x, obj_center_y,\n prev_rot_angle=0.,\n is_erosion=False):\n width = obj_img.shape[0]\n rot_img = ndimage.rotate(obj_img, prev_rot_angle, reshape=False)\n induce_x, induce_y = int(back_center_x - obj_center_x), int(back_center_y - obj_center_y)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rot_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n angle_amount = 4.\n else:\n angle_amount = 16.\n # check combine_img.dtype; rot_img.dtype; back_img\n curr_angle = prev_rot_angle\n while angle_amount > 0.5:\n angle_amount /= 2.\n\n rotate_1 = ndimage.rotate(obj_img, curr_angle + angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y+width, induce_x:induce_x+width] -= rotate_1\n neg_count_1 = len(np.argwhere(combine_img < 0))\n\n rotate_2 = ndimage.rotate(obj_img, curr_angle - angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rotate_2\n neg_count_2 = len(np.argwhere(combine_img < 0))\n\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_angle = curr_angle + angle_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_angle = curr_angle - angle_amount\n # print(curr_angle)\n # print(neg_count, neg_count_1, neg_count_2)\n # print('Negative Pix Count Rotation: %d.' % neg_count)\n # print('Optimal Rotation: ', curr_angle)\n return curr_angle, neg_count", "def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):\n max_value = np.amax(prev_image)\n\n if prev_image.dtype == 'float' and max_value <= 1:\n prev_image = np.uint8(prev_image * 255)\n curr_image = np.uint8(curr_image * 255)\n\n if prev_image.dtype == 'float' and max_value > 1:\n prev_image = np.uint8(prev_image)\n curr_image = np.uint8(curr_image)\n\n prev_image = cv.equalizeHist(prev_image)\n curr_image = cv.equalizeHist(curr_image)\n\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=200)\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(prev_image, None)\n kp2, des2 = orb.detectAndCompute(curr_image, None)\n\n # do feature matching\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # calculate perspective transform matrix\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n vector_along_x_axis_from_center = \\\n np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],\n [size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)\n vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)\n\n theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],\n vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi\n # negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system\n return theta", "def calculate_translation(reference_im:np.ndarray, \n target_im:np.ndarray,\n ref_to_tar_rotation:np.ndarray=None,\n use_autocorr:bool=True,\n alignment_kwargs:dict={},\n verbose:bool=True,\n ):\n from math import pi\n import cv2\n ## quality check\n # images\n if np.shape(reference_im) != np.shape(target_im):\n raise IndexError(f\"two images should be of the same shape\")\n # rotation matrix\n if ref_to_tar_rotation is None:\n ref_to_tar_rotation = np.diag([1,1])\n elif np.shape(ref_to_tar_rotation) != tuple([2,2]):\n raise IndexError(f\"wrong shape for rotation matrix, should be 2x2. \")\n # get dimensions\n _dz,_dx,_dy = np.shape(reference_im)\n # calculate angle\n if verbose:\n print(f\"-- start calculating drift with rotation between images\")\n _rotation_angle = np.arcsin(ref_to_tar_rotation[0,1])/pi*180\n _temp_new_rotation_M = cv2.getRotationMatrix2D((_dx/2, _dy/2), _rotation_angle, 1) # temporary rotation angle\n # rotate image\n if _rotation_angle != 0:\n _rot_target_im = np.array([cv2.warpAffine(_lyr, _temp_new_rotation_M, \n _lyr.shape, borderMode=cv2.BORDER_DEFAULT) \n for _lyr in target_im], dtype=reference_im.dtype)\n else:\n _rot_target_im = target_im\n # calculate drift \n _drift, _drift_flag = align_image(\n _rot_target_im,\n reference_im,\n precision_fold=10,\n use_autocorr=use_autocorr,\n verbose=verbose,\n #detailed_verbose=verbose,\n **alignment_kwargs,)\n\n if verbose:\n print(f\"--- drift: {np.round(_drift,2)} pixels\")\n \n return _rot_target_im, ref_to_tar_rotation, _drift", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def angle(o1,o2):\n\n o1 = np.array(o1)\n o2 = np.array(o2)\n\n o1a = o1[0:3]\n o1b = o1[3:6]\n \n o2a = o2[0:3]\n o2b = o2[3:6]\n\n norm_a = np.linalg.norm(o1a) * np.linalg.norm(o2a)\n norm_b = np.linalg.norm(o1b) * np.linalg.norm(o2b)\n\n dot_a = np.dot(o1a,o2a) / norm_a\n dot_b = np.dot(o1b,o2b) / norm_b\n \n if dot_a > 1.0 and dot_a - 1.0 <= np.finfo(dot_a.dtype).eps:\n dot_a = 1.0\n \n if dot_b > 1.0 and dot_b - 1.0 <= np.finfo(dot_b.dtype).eps:\n dot_b = 1.0\n\n angle_a = np.arccos(dot_a) * (180.0 / np.pi)\n angle_b = np.arccos(dot_b) * (180.0 / np.pi)\n\n return (angle_a, angle_b)", "def collisionAngle(obj1, obj2):\n vec1 = obj1.vec\n vec2 = obj2.vec\n n1 = np.linalg.norm(vec1)\n n2 = np.linalg.norm(vec2)\n return abs(np.cross(vec1,vec2)/(n1*n2))", "def ICAngles(image, keypoints, half_patch_size, u_max):\n \n kp_position = cv.KeyPoint_convert(keypoints)\n ptsize = len(kp_position)\n \n for ptidx in range(ptsize):\n \n m_01 = 0\n m_10 = 0\n\n for u in range(-half_patch_size, half_patch_size+1):\n m_10 = m_10 + u * image[int(kp_position[ptidx,1]),int(kp_position[ptidx,0])+u]\n \n for v in range(1,half_patch_size+1):\n v_sum = 0\n d = u_max[v]\n for u in range(-d, d-1):\n val_plus = int(image[int(kp_position[ptidx,1])+v, int(kp_position[ptidx,0])+u])\n val_minus = int(image[int(kp_position[ptidx,1])-v, int(kp_position[ptidx,0])+u])\n v_sum = v_sum + (val_plus - val_minus)\n m_10 = m_10 + u * (val_plus + val_minus)\n m_01 = m_01 + v * v_sum\n \n keypoints[ptidx].angle = math.atan2(float(m_01), float(m_10))", "def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)", "def rot_center(image, rect, angle):\n\trot_image = pygame.transform.rotate(image, angle)\n\trot_rect = rot_image.get_rect(center=rect.center)\n\treturn rot_image,rot_rect", "def matrix_angle( B, A ):\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))", "def rot_center(image,rect,angle):\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def get_angle(v1, v2):\n return np.arccos(np.dot(v1, v2))", "def get_intersect_angle(self, p0, p1, p2):\n u, v = p1-p0, p2-p0\n costheta = u.dot(v) / math.sqrt(u.dot(u) * v.dot(v))\n return math.degrees(math.acos(costheta))", "def angle_between_vectors(vect_ref, vect):\n\n c = np.dot(vect_ref.T, vect) / (np.linalg.norm(vect_ref) * np.linalg.norm(vect))\n angle = np.arccos(np.clip(c, -1, 1))\n\n return angle", "def pairwise_iou_rotated(boxes1, boxes2):\n return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2)", "def image_correlation(image1, image2):\n im1=im_to_coord(image1)\n im2=im_to_coord(image2)\n z1=im1[:,2]\n z2=im2[:,2]\n mu_z1 = z1.mean()\n mu_z2 = z2.mean()\n n = z1.shape[0]\n s_z1 = z1.std(0, ddof=n - 1)\n s_z2 = z2.std(0, ddof=n - 1)\n cov = np.dot(z1,\n z2.T) - n * np.dot(mu_z1,\n mu_z2)\n return cov / np.dot(s_z1, s_z2)", "def get_angle(v1,v2) :\n\n if (np.linalg.norm(v1)*np.linalg.norm(v2)) != 0 : \n cosangle = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n cosangle = np.maximum(-1,np.minimum(1, cosangle))\n angle = np.arccos(cosangle) \n if np.cross(v1,v2) < 0 :\n angle = 2*np.pi - angle \n return angle\n return None", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def rot_center(self, image, position, angle):\n rect = image.get_rect().move(*position)\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = rot_image.get_rect(center=rect.center)\n return rot_image, rot_rect", "def img_rotate(img, angle, center, fillval=0):\n rows, cols = img.shape[:2]\n M = cv2.getRotationMatrix2D(center, angle, 1)\n return cv2.warpAffine(img, M, (cols, rows), borderValue=fillval)", "def angle(a,b):\n return acos(np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b))", "def compute_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n angle = np.arctan2(sinang, cosang)\n return angle", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def orientToXYZR( a, b ):\n if allclose(a,b):\n return (0,1,0,0)\n an,bn = normalise( (a,b) )\n angle = arccos(dot(an,bn))\n x,y,z = crossProduct( a, b )[0]\n if allclose( (x,y,z), 0.0):\n y = 1.0\n return (x,y,z,angle)", "def angle_between(vecs, baseline):\n vecs = CA_coords(vecs)\n baseline = CA_coords(baseline)\n return np.arccos(np.clip(vecs @ baseline.T, -1.0, 1.0))", "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def icp_step(Points1,Points2):\r\n #get the correspondences\r\n S1,S2 = get_correspondences(Points1,Points2)\r\n\r\n # Center the resulting pairs substracting their means\r\n S1_shift, mean1 = subtract_mean(S1)\r\n S2_shift, mean2 = subtract_mean(S2)\r\n\r\n #calculate the error-minimizing rotation\r\n R = compute_error_minimizing_rotation(S1_shift,S2_shift)\r\n #find the t such that R*p+t = R*(p-mean2)+mean1\r\n Rmean2 = [R[0][0]*mean2[0]+R[0][1]*mean2[1],\r\n R[1][0]*mean2[0]+R[1][1]*mean2[1]]\r\n\r\n return R,[-(mean1[0]-Rmean2[0]),-(mean1[1]-Rmean2[1])]", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def vrrotvec(a,b):\n a = normalize(a)\n b = normalize(b)\n ax = normalize(np.cross(a,b))\n angle = np.arccos(np.minimum(np.dot(a,b),[1]))\n if not np.any(ax):\n absa = np.abs(a)\n mind = np.argmin(absa)\n c = np.zeros((1,3))\n c[mind] = 0\n ax = normalize(np.cross(a,c))\n r = np.concatenate((ax,angle))\n return r", "def einsum_angle_between (vector_array_1, vector_array_2 ):\r\n\r\n # diagonal of dot product\r\n diag = np.clip (np.einsum ('ij,ij->i', vector_array_1, vector_array_2 ), -1, 1 )\r\n\r\n return np.arccos (diag )", "def find_centre_of_rotation(x1, x2, y1, y2):\n\n # chords of rotation of x, y\n\n cx = x2 - x1\n cy = y2 - y1\n\n # know axis is perpendicular to both of these -> is cross product\n\n axis = cx.cross(cy).normalize()\n\n # normal vector to y chord\n\n ny = component(cy, axis).normalize().cross(axis)\n\n # origin of normal vectors, centre of x, y chords\n\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n\n # determine true origin of rotation - normal vector of x chord, construct\n # right-angle-triangle with hypotenuse from unknown origin of rotation\n # to central point of y chord oy, and adjacent the vector parallel to\n # reversed x chord => opposite is on vector from unknown origin of rotation\n # to ox\n\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / (ny).dot(-ncx)\n return oy + d * ny, axis", "def angle(self, v1, v2):\r\n cosang = np.dot(v1, v2)\r\n sinang = np.linalg.norm(np.cross(v1, v2))\r\n return np.arctan2(sinang, cosang)", "def rot_center(image, angle):\r\n orig_rect = image.get_rect()\r\n rot_image = transform.rotate(image, angle)\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n return rot_image", "def find_angle(p1, p2, p3):\n\n BAx = p1[0] - p2[0]\n BAy = p1[1] - p2[1]\n\n BCx = p3[0] - p2[0]\n BCy = p3[1] - p2[1]\n\n a = [BAx, BAy]\n b = [BCx, BCy]\n a_mag = np.linalg.norm(a)\n b_mag = np.linalg.norm(b)\n\n theta = np.arccos(np.dot(a, b) / (a_mag * b_mag))\n\n return math.degrees(theta)", "def _correct_rotation(img: np.ndarray) -> np.ndarray:\n edges = cv2.Canny(img, 50, 150, apertureSize=3)\n lines = cv2.HoughLinesP(\n edges, 1, np.pi / 180, 100, minLineLength=100, maxLineGap=10\n )\n avg_slope = 0\n cnt = 0\n try:\n for line in lines:\n rise = line[0][3] - line[0][1]\n run = line[0][2] - line[0][0]\n if run != 0:\n if rise / run < 0.5:\n avg_slope += rise / run\n cnt += 1\n avg_slope = avg_slope / cnt\n\n image_center = tuple(np.array(img.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(\n image_center, np.arctan(avg_slope) * 180 / np.pi, 1.0\n )\n result = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)\n return result\n except TypeError:\n return img", "def cross_correlation_align_single_image(im, ref_im, precision_fold=100,\n all_channels=_allowed_colors, \n ref_all_channels=None, drift_channel='488',\n single_im_size=_image_size,\n num_buffer_frames=_num_buffer_frames,\n num_empty_frames=_num_empty_frames,\n correction_folder=_correction_folder,\n correction_args={},\n return_all=False,\n verbose=True, detailed_verbose=False, \n ):\n \n if verbose:\n print(f\"-- aligning image\", end=' ')\n if isinstance(im, str):\n print(os.path.join(os.path.basename(os.path.dirname(im))), os.path.basename(im), end=' ')\n if isinstance(ref_im, str):\n print('to '+os.path.join(os.path.basename(os.path.dirname(ref_im))), os.path.basename(ref_im), end=' ')\n \n # set default correction args\n _correction_args = {\n 'hot_pixel_corr':True,\n 'hot_pixel_th':4,\n 'z_shift_corr':False,\n 'illumination_corr':True,\n 'illumination_profile':None,\n 'bleed_corr':False,\n 'chromatic_corr':False,\n 'normalization':False,\n }\n _correction_args.update(correction_args)\n \n # check im file type \n if isinstance(im, np.ndarray):\n if verbose:\n print(f\"-> directly use image\")\n _im = im.copy()\n if np.shape(_im) != tuple(np.array(single_im_size)):\n raise IndexError(f\"shape of im:{np.shape(_im)} and single_im_size:{single_im_size} doesn't match!\")\n elif isinstance(im, str):\n if 'correct_fov_image' not in locals():\n from ..io_tools.load import correct_fov_image\n # load image\n _im = correct_fov_image(im, [drift_channel], \n single_im_size=single_im_size, all_channels=all_channels,\n num_buffer_frames=num_buffer_frames, num_empty_frames=num_empty_frames, \n drift=[0,0,0], calculate_drift=False, drift_channel=drift_channel, \n correction_folder=correction_folder, warp_image=False, verbose=detailed_verbose, \n **_correction_args)[0][0]\n # check ref_im file type \n if isinstance(ref_im, np.ndarray):\n if verbose:\n print(f\"-- directly use ref_image\")\n _ref_im = ref_im\n if np.shape(_ref_im) != tuple(np.array(single_im_size)):\n raise IndexError(f\"shape of ref_im:{np.shape(_ref_im)} and single_im_size:{single_im_size} doesn't match!\")\n elif isinstance(ref_im, str):\n if 'correct_fov_ref_image' not in locals():\n from ..io_tools.load import correct_fov_image\n _ref_im = correct_fov_image(ref_im, [drift_channel], \n single_im_size=single_im_size, all_channels=all_channels,\n num_buffer_frames=num_buffer_frames, num_empty_frames=num_empty_frames, \n drift=[0,0,0], calculate_drift=False, drift_channel=drift_channel, \n correction_folder=correction_folder, warp_image=False, verbose=detailed_verbose, \n **_correction_args)[0][0]\n \n # align by cross-correlation\n from skimage.registration import phase_cross_correlation\n _start_time = time.time()\n _drift, _error, _phasediff = phase_cross_correlation(_ref_im, _im, \n upsample_factor=precision_fold)\n \n # return\n if return_all:\n return _drift, _error, _phasediff\n else:\n return _drift", "def lookup_rotation(source_frame, target_frame, tf_listener = None):\n\n # Check the tf_listener and create new one if None\n if tf_listener is None:\n tf_listener = tf.TransformListener()\n\n # Get the transforamtion from baselink to frame\n (trans,rot) = tf_listener.lookupTransform(source_frame, target_frame, rospy.Time(0))\n\n # Compute dot product\n d = sum([a * b for (a,b) in zip([0,-1],trans)])\n d = d / math.sqrt(sum([a ** 2 for a in trans[0:2]]))\n\n return math.acos(d)", "def align(img, left_eye, right_eye):\n left_eye_x, left_eye_y = left_eye\n right_eye_x, right_eye_y = right_eye\n point_3rd, direction = (left_eye, -1) if left_eye_y > right_eye_y else (right_eye, 1)\n\n # np.linalg.norm is being used for euclidean distance\n a = np.linalg.norm(np.array(left_eye) - np.array(point_3rd))\n b = np.linalg.norm(np.array(right_eye) - np.array(point_3rd))\n c = np.linalg.norm(np.array(right_eye) - np.array(left_eye))\n\n if b != 0 and c != 0:\n angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n angle = (angle * 180) / math.pi\n if direction == -1:\n angle = 90 - angle\n img = Image.fromarray(img)\n img = np.array(img.rotate(direction * angle))\n\n return img", "def angle_vecs(vec1,vec2):\n angle=np.arccos(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))\n return angle", "def circular_cross_correlation(x, y):\n return tf.real(tf.ifft(tf.multiply(tf.conj(tf.fft(tf.cast(x, tf.complex64))) , tf.fft(tf.cast(y, tf.complex64)))))", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def calculate_angle(centre, prev_centre):\n o = centre[1] - prev_centre[1]\n a = centre[0] - prev_centre[0]\n return round(math.degrees(math.atan2(o, a)))", "def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def getangle(p1, p2):\n\treturn atan2( p2[1]-p1[1], p2[0]-p1[0] )", "def PCA_subtraction(im, ref_lib, num_PCA_modes):\n print('Performing PCA background subtraction using {} modes'.format(num_PCA_modes))\n #concatenate input image into 1-D array\n im_x = im.shape[1]\n im_y = im.shape[0]\n \n im = im.ravel()\n\n num_PCA_modes = np.array(num_PCA_modes)\n \n # reads list of reference frames into data matrix by first concatenating the 2-D .fits images\n # into 1-D arrays and then row stacking these images into a 2-D np.array\n try:\n ref_frames = np.stack([fits.getdata(ref_lib[i]).ravel() for i in range(len(ref_lib))], axis=0)\n except:\n ref_frames = np.stack([ref_lib[i].ravel() for i in range(len(ref_lib))], axis=0)\n\n # subtracts the mean of each reference frame from each reference frame \n ref_frames_mean_sub = ref_frames - np.nanmean(ref_frames, axis=1)[:, None]\n ref_frames_mean_sub[np.where(np.isnan(ref_frames_mean_sub))] = 0\n \n # import pdb; pdb.set_trace()\n # creates covariance matrix from mean subtracted reference frames \n covar_psfs = np.cov(ref_frames_mean_sub)\n tot_basis = covar_psfs.shape[0]\n \n num_PCA_modes = np.clip(num_PCA_modes - 1, 0, tot_basis-1) # clip values, for output consistency we'll keep duplicates\n max_basis = np.max(num_PCA_modes) + 1 # maximum number of eigenvectors/KL basis we actually need to use/calculate\n \n # calculates eigenvalues and eigenvectors of the covariance matrix, but only the ones we need (up to max basis)\n evals, evecs = la.eigh(covar_psfs, eigvals=(tot_basis-max_basis, tot_basis-1))\n \n evals = np.copy(evals[::-1])\n evecs = np.copy(evecs[:,::-1], order='F') \n \n # calculates the PCA basis vectors\n basis_vecs = np.dot(ref_frames_mean_sub.T, evecs)\n basis_vecs = basis_vecs * (1. / np.sqrt(evals * (np.size(im) - 1)))[None, :] #multiply a value for each row\n \n #subtract off the mean of the input frame\n im_mean_sub = im - np.nanmean(im)\n \n # duplicate science image by the max_basis to do simultaneous calculation for different number of PCA modes\n im_mean_sub_rows = np.tile(im_mean_sub, (max_basis, 1))\n im_rows_selected = np.tile(im_mean_sub, (np.size(num_PCA_modes), 1)) # this is the output image which has less rows\n \n # bad pixel mask\n # do it first for the image we're just doing computations on but don't care about the output\n im_nanpix = np.where(np.isnan(im_mean_sub_rows))\n im_mean_sub_rows[im_nanpix] = 0\n # now do it for the output image\n im_nanpix = np.where(np.isnan(im_rows_selected))\n im_rows_selected[im_nanpix] = 0\n \n inner_products = np.dot(im_mean_sub_rows, np.require(basis_vecs, requirements=['F']))\n # select the KLIP modes we want for each level of KLIP by multiplying by lower diagonal matrix\n lower_tri = np.tril(np.ones([max_basis, max_basis]))\n inner_products = inner_products * lower_tri\n \n # make a model background for each number of basis vectors we actually output\n model = np.dot(inner_products[num_PCA_modes,:], basis_vecs.T)\n \n # subtract model from input frame for each number of PCA modes chosen\n PCA_sub_images = (im_rows_selected - model).reshape(np.size(num_PCA_modes), im_y, im_x)\n\n #Adding back in the mean to the model so that the model can be subtracted from the original image later. \n if type(num_PCA_modes) is np.int64:\n return PCA_sub_images[0], model.reshape(im_y, im_x)+np.nanmean(im)\n elif type(num_PCA_modes) is np.ndarray:\n return PCA_sub_images, model.reshape(np.size(num_PCA_modes), im_y, im_x)+np.nanmean(im)\n \n else:\n print('Unsupported datatype for variable: num_PCA_modes. Variable must be either int or 1-D np.ndarray')", "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def angle(v1, v2, acute=True):\n angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n if acute == True:\n return angle\n else:\n return 2 * np.pi - angle", "def az_zen_dist(p0,p1):\n #formula comes from translating points into cartesian coordinates\n #taking the dot product to get the cosine between the two vectors\n #then arccos to return to angle, and simplify everything assuming real inputs\n a0,z0 = p0[0], p0[1]\n a1,z1 = p1[...,0], p1[...,1]\n return np.arccos(np.cos(z0)*np.cos(z1)+np.cos(a0-a1)*np.sin(z0)*np.sin(z1))", "def PPBCorr (inImage, pntImage, outImage, err,\n inPlane=[1,1,1,1,1], outPlane=[1,1,1,1,1], antSize=25.0):\n ################################################################\n # Checks\n if not Image.PIsA(inImage):\n raise TypeError,\"inImage MUST be a Python Obit Image\"\n if not Image.PIsA(pntImage):\n print \"Actually \",pntImage.__class__\n raise TypeError,\"pntImage MUST be a Python Obit Image\"\n if not Image.PIsA(outImage):\n print \"Actually \",outImage.__class__\n raise TypeError,\"outImage MUST be a Python Obit Image\"\n if not OErr.OErrIsA(err):\n raise TypeError,\"err MUST be an OErr\"\n if len(inPlane) != 5:\n raise TypeError,\"inPlane must have 5 elements\"\n if len(outPlane) != 5:\n raise TypeError,\"outPlane must have 5 elements\"\n #\n Obit.ImageUtilPBCorr(inImage.me, pntImage.me, outImage.me,\n inPlane, outPlane, antSize, err.me)\n if err.isErr:\n OErr.printErrMsg(err, \"Error making primary beam correction\")\n # end PPBCorr", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def _arcArcIntersectXY(c1,c2,inside=True,params=False):\n x1=c1[0]\n x2=c2[0]\n r1=c1[1][0]\n r2=c2[1][0]\n\n # check for sample reverse condition\n sr1 = c1[1][3]==-2\n sr2 = c2[1][3]==-2\n\n ## first check for non-intersection due to distance between the\n ## centers of the arcs, treating both arcs as circles for the moment\n\n d=dist(x1,x2) #calculate the distance d between circle centers\n\n if d > r1+r2:\n return False # too far, no possiblity of intersection\n\n if ( r1> r2 and d < r1-r2) or (r2 >= r1 and d < r2-r1):\n return False # too close, little arc is fully inside bigger arc\n\n if d < epsilon:\n return False # circle centers too close for stable calculation\n\n ## OK, we are in the goldilocks zone of intersection. this means\n ## that if boh arcs are cicles or if inside=False we are\n ## guaranteed one or two intersections. Calculate those\n ## intersections and then test to see if they fall between start\n ## and end of the respective arcs\n\n ## we start by calculating the distance id of the intersection plane\n ## from the center of arc 1, knowing that by definition id <= r1\n\n ## Math: consider the triangle with side lengths r1, r2, and d,\n ## where d is the previously calculated distance between arc\n ## centers. Consider the two right triangles with side lengths\n ## r1, id, h, and r2, h, (d-id). We know that:\n ## id^2 + h^2 = r1^2, (d-id)^2 + h^2 = r2^2\n ## solving both for h2 and then substituting, this means:\n ## r1^2 - id^2 = r2^2 - (d-id)^2\n ## collecting terms and solving for id produces:\n ## id = (r1^2-r2^2 + d^2)/2d\n\n id = (r1*r1 - r2*r2 + d*d)/(2 * d)\n\n ## compute the point on the line connecting the two arc centers\n ## that is id away from the first arc\n\n v1 = scale3(sub(x2,x1),1.0/d) # unitary direction vector pointing\n # from x1 to x2\n v2 = scale3(v1,id) # point on line between two circles in\n # coordinate space centered at x1\n\n ## compute direction vector o orthgonal to v1 -- the line that\n ## intersects point v2 and v2+o will pass through our intersection\n ## points\n\n o = orthoXY(v1)\n \n ## now, transform back into world coordinates and calculate the\n ## intersection of this line with either of our arcs, treating\n ## them as circles for now\n\n l = [add(v2,x1),add(add(v2,o),x1)]\n\n s = _lineArcIntersectXY(l,c1,False)\n\n ## as a sanity check, do the same with the other arc. Results\n ## should be within epsilon\n #ss = _lineArcIntersectXY(l,c2,False)\n #foo = list(map(lambda x, y: dist(x,y) < epsilon,s,ss))\n #print(\"sanity check: \" , foo)\n\n if not s or len(s) == 0:\n raise ValueError('no computed intersections, something is wrong')\n\n if not inside and not params:\n return s\n \n ## jump back to arc1 and arc2 space and check angles\n\n s1 = list(map(lambda x: sub(x,x1),s))\n s2 = list(map(lambda x: sub(x,x2),s))\n\n ## compute start and end angles for arcs\n start1=c1[1][1]\n end1=c1[1][2]\n if not (start1 == 0 and end1 == 360):\n start1 = start1 % 360.0\n end1 = end1 % 360.0\n if end1 < start1:\n end1 = end1 + 360.0\n \n start2=c2[1][1]\n end2=c2[1][2]\n \n if not (start2 == 0 and end2 == 360):\n start2 = start2 % 360.0\n end2 = end2 % 360.0\n if end2 < start2:\n end2 = end2 + 360.0\n \n\n ## check each intersection against angles for each arc. \n ss = []\n uparam1 = []\n uparam2 = []\n for i in range(len(s)):\n p1 =s1[i]\n p2 =s2[i]\n ang1 = (atan2(p1[1],p1[0]) % pi2)*360.0/pi2\n ang2 = (atan2(p2[1],p2[0]) % pi2)*360.0/pi2\n\n if params:\n u1 = 0\n u2 = 0\n if end1 <= 360.0 or ang1 >= start1 or \\\n ( end1 > 360.0 and ang1 > end1-360.0):\n u1 = (ang1-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n elif end1 > 360.0:\n u1 = (ang1+360.0-start1)/(end1-start1)\n if sr1:\n u1 = 1.0-u1\n uparam1 = uparam1 + [ u1 ]\n \n if end2 <= 360.0 or ang2 >= start2 or \\\n ( end2 > 360.0 and ang2 > end1-360.0):\n u2 = (ang2-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n elif end2 > 360.0:\n u2 = (ang2+360.0-start2)/(end2-start2)\n if sr2:\n u2 = 1.0-u2\n uparam2 = uparam2 + [ u2]\n \n else:\n good = False\n ## check angle against first arc\n if end1 <= 360.0 and ang1 >= start1 and ang1 <= end1:\n good = True\n elif end1 > 360.0 and (ang1 >= start1 or ang1<= end1-360.0):\n good = True\n\n ## check angle against second arc\n if end2 <= 360.0 and ang2 >= start2 and ang2 <= end2:\n good = good and True\n elif end2 > 360.0 and (ang2 >= start2 or ang2<= end2-360.0):\n good = good and True\n else:\n good = False\n\n ## only add instersection to the list if both checks were passed\n if good:\n ss = ss + [ s[i] ]\n \n if not params and len(ss) == 0:\n return False\n else:\n if params:\n return [uparam1,uparam2]\n else:\n return ss", "def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))", "def circumcenter(coil1, coil2, coil3):\n N = coil1.shape[1]\n cc = np.zeros((6, N)) * np.nan\n # x-, y-, and z-coordinates of the circumcenter\n # use coordinates relative to point `a' of the triangle\n xba = coil2[0, :] - coil1[0, :]\n yba = coil2[1, :] - coil1[1, :]\n zba = coil2[2, :] - coil1[2, :]\n xca = coil3[0, :] - coil1[0, :]\n yca = coil3[1, :] - coil1[1, :]\n zca = coil3[2, :] - coil1[2, :]\n\n # squares of lengths of the edges incident to `a'\n balength = xba * xba + yba * yba + zba * zba\n calength = xca * xca + yca * yca + zca * zca\n\n # cross product of these edges\n xcrossbc = yba * zca - yca * zba\n ycrossbc = zba * xca - zca * xba\n zcrossbc = xba * yca - xca * yba\n\n # calculate the denominator of the formulae\n denominator = 0.5 / (xcrossbc * xcrossbc + ycrossbc * ycrossbc\n + zcrossbc * zcrossbc)\n\n # calculate offset (from `a') of circumcenter\n xcirca = ((balength * yca - calength * yba) * zcrossbc -\n (balength * zca - calength * zba) * ycrossbc) * denominator\n ycirca = ((balength * zca - calength * zba) * xcrossbc -\n (balength * xca - calength * xba) * zcrossbc) * denominator\n zcirca = ((balength * xca - calength * xba) * ycrossbc -\n (balength * yca - calength * yba) * xcrossbc) * denominator\n\n cc[0, :] = xcirca + coil1[0, :]\n cc[1, :] = ycirca + coil1[1, :]\n cc[2, :] = zcirca + coil1[2, :]\n # orientation of the circumcenter with respect to the x-, y-, and z-axis\n # coordinates\n v = np.stack([cc[0, :].T, cc[1, :].T, cc[2, :].T]).T\n vx = np.stack([np.zeros((N,)).T, cc[1, :].T, cc[2, :].T]).T\n # on the x - axis\n vy = np.stack([cc[0, :].T, np.zeros((N,)).T, cc[2, :].T]).T\n # on the y - axis\n vz = np.stack([cc[0, :].T, cc[1, :].T, np.zeros((N,)).T]).T\n # on the z - axis\n thetax, thetay = np.zeros((N,)) * np.nan, np.zeros((N,)) * np.nan\n thetaz = np.zeros((N,)) * np.nan\n for j in range(N):\n\n # find the angles of two vectors opposing the axes\n thetax[j] = np.arccos(np.dot(v[j, :], vx[j, :]) /\n (np.linalg.norm(v[j, :]) * np.linalg.norm(vx[j, :])))\n thetay[j] = np.arccos(np.dot(v[j, :], vy[j, :]) /\n (np.linalg.norm(v[j, :]) * np.linalg.norm(vy[j, :])))\n thetaz[j] = np.arccos(np.dot(v[j, :], vz[j, :]) /\n (np.linalg.norm(v[j, :]) * np.linalg.norm(vz[j, :])))\n\n # convert to degrees\n cc[3, j] = (thetax[j] * (180 / np.pi))\n cc[4, j] = (thetay[j] * (180 / np.pi))\n cc[5, j] = (thetaz[j] * (180 / np.pi))\n return cc", "def calculate_polar_angle(p1, p2):\n # note the negative sign before the first component, which is y component\n # the y in scikit-image is flipped.\n # it is to convert the angle into right-handed coordinate\n # the range is from -pi to pi\n angle = np.arctan2(-(p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / np.pi\n\n return angle", "def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(self, image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def concat_3dimages_corners(imga, imgb, xoffset=0, yoffset=0, zoffset=0,\n transpose=True, ontop=True, center_offset=True,\n adjust_z=(0, 1)):\n print(\"Concating images with reference point being the lower left corner\")\n if transpose:\n print(\"Transpose images\")\n imga = np.transpose(imga, axes=(0, 2, 1))\n imgb = np.transpose(imgb, axes=(0, 2, 1))\n\n offset = (abs(zoffset), abs(yoffset), abs(xoffset))\n max_dim = np.maximum.reduce([imga.shape, np.add(imgb.shape, offset)])\n\n # center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n # center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n\n # if (max_dim == imgb.shape).all():\n # tmp = np.copy(imgb)\n # imgb = np.copy(imga)\n # imga = np.copy(tmp)\n # ontop = toggle(ontop)\n # xoffset *= -1\n # yoffset *= -1\n # zoffset *= -1\n\n # tmp_offset = np.array(offset)\n # tmp_offset[tmp_offset > 0] = 0\n # new_img = np.full(np.add(max_dim, np.abs(offset)), np.nan)\n new_img = np.full(max_dim, np.nan)\n\n Sa0 = slice(0, imga.shape[0])\n Sa1 = slice(0, imga.shape[1])\n Sa2 = slice(0, imga.shape[2])\n Sb0 = slice(abs(zoffset), abs(zoffset) + imgb.shape[0])\n Sb1 = slice(abs(yoffset), abs(yoffset) + imgb.shape[1])\n Sb2 = slice(abs(xoffset), abs(xoffset) + imgb.shape[2])\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n zdir = np.sign(zoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n if zdir == 0:\n zdir = 1\n\n imga = imga[::zdir, ::ydir, ::xdir]\n imgb = imgb[::zdir, ::ydir, ::xdir]\n\n if adjust_z:\n for ix in adjust_z:\n top_img = 1 * new_img[ix]\n top_img[Sa1, Sa2] = imga[ix]\n top_img[Sb1, Sb2] = imgb[ix]\n low_img = 1 * new_img[ix]\n low_img[Sb1, Sb2] = imgb[ix]\n low_img[Sa1, Sa2] = imga[ix]\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb[ix] -= add\n\n print(\"new_img shape: \", new_img.shape)\n\n if ontop:\n new_img[Sa0, Sa1, Sa2] = imga\n new_img[Sb0, Sb1, Sb2] = imgb\n else:\n new_img[Sb0, Sb1, Sb2] = imgb\n new_img[Sa0, Sa1, Sa2] = imga\n\n new_img\n\n if transpose:\n print(\"Transpose back\")\n return np.transpose(new_img[::zdir, ::ydir, ::xdir], axes=(0, 2, 1))\n else:\n return new_img[::zdir, ::ydir, ::xdir]", "def rotate(surface, angle, pivot, offset):\n rotated_image = pg.transform.rotozoom(surface, -angle, 1) # Rotate the image.\n rotated_offset = offset.rotate(angle) # Rotate the offset vector.\n # Add the offset vector to the center/pivot point to shift the rect.\n rect = rotated_image.get_rect(center=pivot+rotated_offset)\n return rotated_image, rect # Return the rotated image and shifted rect.", "def angle(self, other):\n return acosd(np.clip(self.uv().dot(other.uv()), -1, 1))", "def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang", "def get_angle(a, b, c):\n\n ba = a - b\n cb = c - b\n\n ba_mod = mod(ba)\n cb_mod = mod(cb)\n val = dot(ba, cb) / (ba_mod * cb_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n\n return np.arccos(val)", "def rotate_and_wrap_image(self, image, degree_of_rotation):\n\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, degree_of_rotation, 1.0)\n # borderMode (constant) and borderValue are important for maintaiing consistency \n ri = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT,borderValue = (255,255,255))\n return ri", "def getCrossingAngleIC (self):\n \n Axes = []\n \n Axes = [[tmhelix.ICAxis_X,tmhelix.ICAxis_Y,tmhelix.ICAxis_Z] for tmhelix in self.tmhelixmodel_set]\n \n CrossingAngleIC = SetOfVectors([Axes[0], Axes[1] ]) .AngleDEG ()\n \n return", "def align_image(\n src_im:np.ndarray, \n ref_im:np.ndarray, \n crop_list=None,\n use_autocorr=True, precision_fold=100, \n min_good_drifts=3, drift_diff_th=1.,\n all_channels=_allowed_colors, \n ref_all_channels=None, \n drift_channel='488',\n correction_args={},\n fitting_args={},\n match_distance_th=2.,\n verbose=True, \n detailed_verbose=False, \n ):\n \n from ..io_tools.load import correct_fov_image\n from ..spot_tools.fitting import fit_fov_image\n from ..spot_tools.fitting import select_sparse_centers\n from skimage.registration import phase_cross_correlation\n #print(\"**\", type(src_im), type(ref_im))\n ## check inputs\n # correciton keywords\n _correction_args = {_k:_v for _k,_v in _default_align_corr_args.items()}\n _correction_args.update(correction_args)\n # fitting keywords\n _fitting_args = {_k:_v for _k,_v in _default_align_fitting_args.items()}\n _fitting_args.update(fitting_args)\n \n # check crop_list:\n if crop_list is None:\n crop_list = generate_drift_crops(_correction_args['single_im_size'])\n for _crop in crop_list:\n if np.shape(np.array(_crop)) != (3,2):\n raise IndexError(f\"crop should be 3x2 np.ndarray.\")\n # check channels\n _all_channels = [str(_ch) for _ch in all_channels]\n # check bead_channel\n _drift_channel = str(drift_channel)\n if _drift_channel not in all_channels:\n raise ValueError(f\"bead channel {_drift_channel} not exist in all channels given:{_all_channels}\")\n # check ref_all_channels\n if ref_all_channels is None:\n _ref_all_channels = _all_channels\n else:\n _ref_all_channels = [str(_ch) for _ch in ref_all_channels]\n \n ## process source image\n # define result flag\n _result_flag = 0\n # process image\n if isinstance(src_im, np.ndarray):\n if verbose:\n print(f\"-- start aligning given source image to\", end=' ')\n _src_im = src_im\n elif isinstance(src_im, str):\n if verbose:\n print(f\"-- start aligning file {src_im}.\", end=' ')\n if not os.path.isfile(src_im) or src_im.split('.')[-1] != 'dax':\n raise IOError(f\"input src_im: {src_im} should be a .dax file!\")\n _src_im = correct_fov_image(src_im, [_drift_channel], \n all_channels=_all_channels,\n calculate_drift=False, \n return_drift=False, verbose=detailed_verbose,\n **_correction_args)[0]\n else:\n raise IOError(f\"Wrong input file type, {type(src_im)} should be .dax file or np.ndarray\")\n \n ## process reference image\n if isinstance(ref_im, np.ndarray):\n if verbose:\n print(f\"given reference image.\")\n _ref_im = ref_im\n elif isinstance(ref_im, str):\n if verbose:\n print(f\"reference file:{ref_im}.\")\n if not os.path.isfile(ref_im) or ref_im.split('.')[-1] != 'dax':\n raise IOError(f\"input ref_im: {ref_im} should be a .dax file!\")\n _ref_im = correct_fov_image(ref_im, [_drift_channel], \n all_channels=_ref_all_channels,\n calculate_drift=False, \n return_drift=False, verbose=detailed_verbose,\n **_correction_args)[0][0]\n else:\n raise IOError(f\"Wrong input ref file type, {type(ref_im)} should be .dax file or np.ndarray\")\n\n if np.shape(_src_im) != np.shape(_ref_im):\n raise IndexError(f\"shape of target image:{np.shape(_src_im)} and reference image:{np.shape(_ref_im)} doesnt match!\")\n\n ## crop images\n _crop_src_ims, _crop_ref_ims = [], []\n for _crop in crop_list:\n _s = tuple([slice(*np.array(_c,dtype=np.int)) for _c in _crop])\n _crop_src_ims.append(_src_im[_s])\n _crop_ref_ims.append(_ref_im[_s])\n ## align two images\n _drifts = []\n for _i, (_sim, _rim) in enumerate(zip(_crop_src_ims, _crop_ref_ims)):\n _start_time = time.time()\n if use_autocorr:\n if detailed_verbose:\n print(\"--- use auto correlation to calculate drift.\")\n # calculate drift with autocorr\n _dft, _error, _phasediff = phase_cross_correlation(_rim, _sim, \n upsample_factor=precision_fold)\n else:\n if detailed_verbose:\n print(\"--- use beads fitting to calculate drift.\")\n # source\n _src_spots = fit_fov_image(_sim, _drift_channel, \n verbose=detailed_verbose,\n **_fitting_args) # fit source spots\n _sp_src_cts = select_sparse_centers(_src_spots[:,1:4], match_distance_th) # select sparse source spots\n # reference\n _ref_spots = fit_fov_image(_rim, _drift_channel, \n verbose=detailed_verbose,\n **_fitting_args)\n _sp_ref_cts = select_sparse_centers(_ref_spots[:,1:4], match_distance_th, \n verbose=detailed_verbose) # select sparse ref spots\n #print(_sp_src_cts, _sp_ref_cts)\n \n # align\n _dft, _paired_src_cts, _paired_ref_cts = align_beads(\n _sp_src_cts, _sp_ref_cts,\n _sim, _rim,\n use_fft=True,\n match_distance_th=match_distance_th, \n return_paired_cts=True,\n verbose=detailed_verbose,\n )\n _dft = _dft * -1 # beads center is the opposite as cross correlation\n # append \n _drifts.append(_dft) \n if verbose:\n print(f\"-- drift {_i}: {np.around(_dft, 2)} in {time.time()-_start_time:.3f}s.\")\n\n # detect variance within existing drifts\n _mean_dft = np.nanmean(_drifts, axis=0)\n if len(_drifts) >= min_good_drifts:\n _dists = np.linalg.norm(_drifts-_mean_dft, axis=1)\n _kept_drift_inds = np.where(_dists <= drift_diff_th)[0]\n if len(_kept_drift_inds) >= min_good_drifts:\n _updated_mean_dft = np.nanmean(np.array(_drifts)[_kept_drift_inds], axis=0)\n _result_flag += 0\n if verbose:\n print(f\"--- drifts for crops:{_kept_drift_inds} pass the thresold, exit cycle.\")\n break\n \n if '_updated_mean_dft' not in locals():\n if verbose:\n print(f\"-- return a sub-optimal drift\")\n _drifts = np.array(_drifts)\n # select top 3 drifts\n from scipy.spatial.distance import pdist, squareform\n _dist_mat = squareform(pdist(_drifts))\n np.fill_diagonal(_dist_mat, np.inf)\n # select closest pair\n _sel_inds = np.array(np.unravel_index(np.argmin(_dist_mat), np.shape(_dist_mat)))\n _sel_drifts = list(_drifts[_sel_inds])\n # select closest 3rd drift\n _sel_drifts.append(_drifts[np.argmin(_dist_mat[:, _sel_inds].sum(1))])\n if detailed_verbose:\n print(f\"--- select drifts: {np.round(_sel_drifts, 2)}\")\n # return mean\n _updated_mean_dft = np.nanmean(_sel_drifts, axis=0)\n _result_flag += 1\n\n return _updated_mean_dft, _result_flag", "def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0", "def get_angle_between(self, other):\n cross = self.x*other[1] - self.y*other[0]\n dot = self.x*other[0] + self.y*other[1]\n return math.atan2(cross, dot)", "def _get_angle(point1, point2):\n ydelta = point2[0] - point1[0]\n xdelta = point2[1] - point1[1]\n if xdelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arcsin(ydelta / hypot)\n elif ydelta == 0:\n hypot = np.sqrt(xdelta ** 2 + ydelta ** 2)\n theta = np.arccos(xdelta / hypot)\n else:\n theta = np.arctan(ydelta / xdelta)\n return theta", "def rotoreflection(axis, angle, origin=(0, 0, 0)):\n rot = SymmOp.from_origin_axis_angle(origin, axis, angle)\n refl = SymmOp.reflection(axis, origin)\n m = np.dot(rot.affine_matrix, refl.affine_matrix)\n return SymmOp(m)", "def corr(a, b):\n ma = np.mean(a)\n mb = np.mean(b)\n\n a_ = a - ma\n b_ = b - mb\n\n norma = np.sqrt(np.sum(a_ ** 2, axis=0))\n normb = np.sqrt(np.sum(b_ ** 2, axis=0))\n\n norma[norma < TOLERANCE] = 1.0\n normb[normb < TOLERANCE] = 1.0\n\n a_ *= 1.0 / norma\n b_ *= 1.0 / normb\n\n ip = np.dot(a_.T, b_)\n\n if ip.shape == (1, 1):\n return ip[0, 0]\n else:\n return ip", "def _calculate_angle(x0, y0, x1, y1):\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle", "def angle_between_vectors(vector1,vector2):\n value = np.sum(np.multiply(vector1, vector2)) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n if (value<-1) | (value>1):\n value = np.sign(value)\n angle = np.arccos(value)\n return angle", "def angle_hkls(self, h1, h2):\n h1v = norm_vec((vec(*h1).T * self.Bmat)).T\n h2v = norm_vec((vec(*h2).T * self.Bmat)).T\n return np.around(np.arccos(h1v.T*h2v)[0, 0] * degrees, 3)", "def rotate_phasor(r, r1, r2):\n return (r - r2) / (r1 - r2)", "def circ_dist(azimuth1, azimuth2, radius=1.0):\n return np.arccos(np.cos(azimuth1 - azimuth2))", "def SVD_rotate(m1, m2):\n assert m1.shape[0] == m2.shape[0]\n\n # Find the centroids of m1, m2\n centroid1 = np.mean(m1, axis=0)\n centroid2 = np.mean(m2, axis=0)\n\n # Build the covariance matrix\n H = np.dot((m1 - centroid1).T, (m2 - centroid2))\n\n U, S, V = np.linalg.svd(H)\n\n # Middle matrix is to ensure that matrix yields a rotation, not reflection\n R = np.dot(V.T, np.array([ [1,0,0] , [0,1,0], [0,0, np.linalg.det(np.dot(V.T,U.T))] ]) ) \n R = np.dot(R, U.T)\n\n # Find translation \n t = -np.dot(R, centroid1) + centroid2\n \n return (R, t)", "def angle_between(i1, j1, i2, j2):\n\n dot_product = i1 * i2 + j1 * j2\n magnitude1 = np.sqrt(i1 ** 2 + j1 ** 2)\n magnitude2 = np.sqrt(i2 ** 2 + j2 ** 2)\n\n theta = np.arccos(dot_product / (magnitude1 * magnitude2))\n\n return np.rad2deg(theta).round(3)", "def angle_between_vectors(a, b):\n return math.acos(dot_product(a, b) / (length(a) * length(b)))", "def find_plane_angles(self, roof_motor_position):\n\n # Calcolo il punto mediano tra i vertici 2 e 3\n pc_x = (self.roof_vertex_x[1] + self.roof_vertex_x[2]) / 2\n pc_y = (self.roof_vertex_y[1] + self.roof_vertex_y[2]) / 2\n pc_z = (self.roof_vertex_z[1] + self.roof_vertex_z[2]) / 2\n\n # Questa non so cosa sia\n base_r = [[self.roof_vertex_x[0] - pc_x, self.roof_vertex_y[0] - pc_y, self.roof_vertex_z[0] - pc_z],\n [self.roof_vertex_x[1] - pc_x, self.roof_vertex_y[1] - pc_y, self.roof_vertex_z[1] - pc_z],\n [0.0, 0.0, 0.0]]\n\n # Questa e' la costruzione di una matrice\n mat_rot = [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]]\n\n # Non so quale operazione è implementata, ma a me servono solo tre elementi, j=2, i=0,1, j=1, i=0\n # Primo elemento, j=1, i=0\n mr = math.sqrt((base_r[0][0] ** 2) + (base_r[0][1] ** 2) + (base_r[0][2] ** 2))\n mat_rot[1][0] = base_r[0][1] / mr\n # Secondo elemento, j=2, i=0\n mat_rot[2][0] = base_r[0][2] / mr\n # Terzo elemento, j=2, i=1\n mr = math.sqrt((base_r[1][0] ** 2) + (base_r[1][1] ** 2) + (base_r[1][2] ** 2))\n mat_rot[2][1] = base_r[1][2] / mr\n\n # In alternativa posso calcolare tutti gli elementi della matrice\n # for i in range(2):\n # mr = math.sqrt((base_r[i][0] ** 2) + (base_r[i][1] ** 2) + (base_r[i][2] ** 2))\n # for j in range(3):\n # base_r[i][j] /= mr\n # mat_rot[j][i] = base_r[i][j]\n\n # Sono elementi della matrice non utilizzati\n # base_r[2][0] = +base_r[1][1] * base_r[0][2] - base_r[0][1] * base_r[1][2]\n # base_r[2][1] = -base_r[1][0] * base_r[0][2] + base_r[0][0] * base_r[1][2]\n # base_r[2][2] = +base_r[1][0] * base_r[0][1] - base_r[0][0] * base_r[1][1]\n # for i in range(3):\n # mat_rot[i][2] = base_r[2][i]\n\n # Qui estraggo la terna di Tait-Bryan angles usata internamente, la Z1Y2X3\n k17 = mat_rot[2][0]\n k16 = mat_rot[1][0]\n l17 = mat_rot[2][1]\n m20 = math.asin(k17)\n i23 = math.cos(m20)\n i24 = k16 / i23\n i25 = l17 / i23\n m19 = math.asin(i24)\n self.zyx1_r = m19 + roof_motor_position\n self.zyx2_r = math.asin(k17)\n self.zyx3_r = math.asin(i25)\n self.zyx3 = self.zyx3_r / Kinematic.M_TO_RAD\n self.zyx2 = self.zyx2_r / Kinematic.M_TO_RAD\n self.zyx1 = self.zyx1_r / Kinematic.M_TO_RAD\n angles = self.zyx_r_to_xyz(self.zyx3_r, self.zyx2_r, self.zyx1_r)\n self.xyz1 = angles[2]\n self.xyz2 = angles[0]\n self.xyz3 = angles[1]\n self.xyz1_r = angles[5]\n self.xyz2_r = angles[3]\n self.xyz3_r = angles[4]", "def rot_center(image, angle):\r\n orig_rect = image.get_rect()\r\n rot_image = pygame.transform.rotate(image, angle)\r\n rot_rect = orig_rect.copy()\r\n rot_rect.center = rot_image.get_rect().center\r\n rot_image = rot_image.subsurface(rot_rect).copy()\r\n return rot_image", "def angle_between(a, b):\n from math import acos\n return acos( dot_product(a, b) / (magnitude(a) * magnitude(b)) )", "def get_angle(input_img):\n # Tuning parameters\n min_square_area = (float)(input_img.shape[1] * 0.05)\n\n # Creates copy of image to avoid modifying original.\n img = numpy.array(input_img, copy=True)\n\n # Scale pixel values from 0-1 to 0-255\n img *= 255\n img = img.astype(numpy.uint8)\n\n thresh = cv2.adaptiveThreshold(\n img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 201, 2)\n\n # Find all contours\n contours = []\n cv2_version = cv2.__version__\n if cv2_version.startswith('2.4.'):\n contours, _ = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n elif cv2_version.startswith('3.2.'):\n _, contours, _ = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Filter contours to squares only.\n square_contours = []\n\n for contour in contours:\n rect = cv2.minAreaRect(contour)\n _, (width, height), angle = rect\n\n # Skip non-squares (with 0.1 tolerance)\n tolerance = 0.1\n if width < height * (1 - tolerance) or width > height * (1 + tolerance):\n continue\n\n # Remove very small contours.\n # These are usually just tiny dots due to noise.\n area = cv2.contourArea(contour)\n if area < min_square_area:\n continue\n\n if cv2_version.startswith('2.4.'):\n box = numpy.int0(cv2.cv.BoxPoints(rect))\n elif cv2_version.startswith('3.2.'):\n box = numpy.int0(cv2.boxPoints(rect))\n square_contours.append(contour)\n\n areas = []\n for contour in square_contours:\n area = cv2.contourArea(contour)\n areas.append(area)\n\n median_area = numpy.median(areas)\n\n filtered_squares = []\n filtered_angles = []\n for square in square_contours:\n area = cv2.contourArea(square)\n if area < median_area * 0.90 or area > median_area * 1.10:\n continue\n\n filtered_squares.append(square)\n _, (width, height), angle = cv2.minAreaRect(square)\n filtered_angles.append(angle)\n\n if len(filtered_angles) < 10:\n return None\n\n return numpy.median(filtered_angles)", "def concat_images_corner(imga, imgb, xoffset=0, yoffset=0, direction='horizontal',\n ontop=True, adjust_z=False):\n if direction == 'horizontal':\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n offset = (abs(yoffset), abs(xoffset))\n tmp_offset = np.array(offset)\n\n # if (max_dim == imgb.shape).all():\n # tmp = np.copy(imgb)\n # imgb = np.copy(imga)\n # imga = np.copy(tmp)\n # ontop = toggle(ontop)\n # xoffset *= -1\n # yoffset *= -1\n\n # center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(offset)), np.nan)\n\n Sa0 = slice(0, imga.shape[0])\n Sa1 = slice(0, imga.shape[1])\n Sb0 = slice(abs(yoffset), abs(yoffset) + imgb.shape[0])\n Sb1 = slice(abs(xoffset), abs(xoffset) + imgb.shape[1])\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n\n imga = imga[::ydir, ::xdir]\n imgb = imgb[::ydir, ::xdir]\n\n if adjust_z:\n top_img = 1 * new_img\n top_img[Sa0, Sa1] = imga\n top_img[Sb0, Sb1] = imgb\n low_img = 1 * new_img\n low_img[Sb0, Sb1] = imgb\n low_img[Sa0, Sa1] = imga\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb -= add\n\n if ontop:\n new_img[Sa0, Sa1] = imga\n new_img[Sb0, Sb1] = imgb\n else:\n new_img[Sb0, Sb1] = imgb\n new_img[Sa0, Sa1] = imga\n\n return new_img[::ydir, ::xdir]", "def cpvrotate(self, other):\n return Vec2d(self.x*other.x - self.y*other.y, self.x*other.y + self.y*other.x)", "def angle(vec1, vec2):\n assert vec1.shape == vec2.shape\n \n cos_vec = np.inner(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))\n angle = math.acos(cos_vec)\n in_deg = math.degrees(angle)\n if in_deg >= 90:\n return (180-in_deg)\n return in_deg", "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def rot_center(image, angle):\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def angle( nt1, nt2, nt3 ):\n if vector(nt1, nt2) == [0,0]:\n print(\"nt1\", nt1.seqpos, \" at \", nt1.x, nt1.y, \" is at the same position as nt2\", nt2.seqpos)\n if vector(nt2, nt3) == [0,0]:\n print(\"nt2\", nt2.seqpos, \" at \", nt2.x, nt2.y, \" is at the same position as nt3\", nt3.seqpos)\n #print(vector(nt1, nt2), vector(nt2, nt3))\n if vectors_close(vector(nt1, nt2), vector(nt2, nt3)):\n # These vectors are identical and that is messing with the ability to call two things parallel?\n return 180.0\n return 180.0 - math.degrees(math.acos(dot(vector(nt1, nt2), vector(nt2, nt3)) / (mod(vector(nt1, nt2)) * mod(vector(nt2, nt3)))))", "def rotate_shape(shape, xy_center, angle_degrees):", "def get_opt_translate(obj_img,\n back_img,\n back_center_x,\n back_center_y,\n obj_center_x,\n obj_center_y,\n prev_row_trans=0,\n prev_col_trans=0,\n is_erosion=False):\n width = obj_img.shape[0]\n obj_center_x = int(obj_center_x)\n obj_center_y = int(obj_center_y)\n curr_row_trans, curr_col_trans = prev_row_trans, prev_col_trans\n induce_x = int(back_center_x - obj_center_x + curr_col_trans)\n induce_y = int(back_center_y - obj_center_y + curr_row_trans)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= obj_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n trans_amount = 4\n else:\n trans_amount = 8\n while trans_amount > 1:\n trans_amount = trans_amount / 2\n neg_count_1 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=trans_amount,\n trans_col=0)\n neg_count_2 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=(-trans_amount),\n trans_col=0)\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_row_trans += trans_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_row_trans -= trans_amount\n\n induce_y = back_center_y - obj_center_y + curr_row_trans\n if is_erosion:\n trans_amount = 4\n else:\n trans_amount = 16\n while trans_amount > 1:\n trans_amount = trans_amount / 2\n neg_count_1 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=0,\n trans_col=trans_amount)\n neg_count_2 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=0,\n trans_col=(-trans_amount))\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_col_trans += trans_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_col_trans -= trans_amount\n # print('Negative Pix Count Translation: %d.' % neg_count)\n # print(curr_row_trans, curr_col_trans)\n return curr_row_trans, curr_col_trans, neg_count", "def angle(self, other):\n n1 = self.norm()\n n2 = other.norm()\n c = (self * other) / (n1 * n2)\n # Take care of roundoff errors\n c = min(c, 1)\n c = max(-1, c)\n return numpy.arccos(c)", "def orientation(pointA, pointB, target):\n if target in (pointA, pointB):\n return -1\n buf = np.array([1, pointA.X, pointA.Y, 1, pointB.X, pointB.Y, 1, target.X, target.Y]).reshape(3,-1)\n buf = np.linalg.det(buf)\n if abs(buf) < Drawable._comparisonLimit:\n return 0\n if buf < 0:\n return -1\n return 1", "def angle_between_vectors(x, y):\n first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (\n np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *\n np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))\n second_step = np.arccos(first_step)\n return (second_step)", "def ioa(boxes1, boxes2):\n intersect = intersection(boxes1, boxes2)\n areas = np.expand_dims(area(boxes2), axis=0)\n return intersect / areas", "def _adi_rdi_pca(cube, cube_ref, angle_list, ncomp, scaling, mask_center_px,\n debug, svd_mode, imlib, interpolation, collapse, verbose,\n full_output, start_time):\n n, y, x = cube.shape\n if not cube_ref.ndim == 3:\n msg = 'Input reference array is not a cube or 3d array'\n raise ValueError(msg)\n if not cube_ref.shape[1] == y:\n msg = 'Reference and target frames have different shape'\n raise TypeError(msg)\n\n if ncomp > n:\n ncomp = min(ncomp, n)\n msg = 'Number of PCs too high (max PCs={}), using {} PCs instead.'\n print(msg.format(n, ncomp))\n residuals_result = _subtr_proj_fullfr(cube, cube_ref, ncomp, scaling,\n mask_center_px, debug, svd_mode,\n verbose, full_output)\n if full_output:\n residuals_cube = residuals_result[0]\n reconstructed = residuals_result[1]\n V = residuals_result[2]\n pcs = reshape_matrix(V, y, x)\n recon = reshape_matrix(reconstructed, y, x)\n else:\n residuals_cube = residuals_result\n residuals_cube_ = cube_derotate(residuals_cube, angle_list, imlib=imlib,\n interpolation=interpolation)\n frame = cube_collapse(residuals_cube_, mode=collapse)\n\n if verbose:\n print('Done de-rotating and combining')\n timing(start_time)\n return pcs, recon, residuals_cube, residuals_cube_, frame", "def angle(v1, v2):\n return acos(np.clip(v1.dot(v2) / (length(v1) * length(v2)), -1.0, 1.0))", "def compute_angle(self, a, b, c):\n\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / \\\n (np.linalg.norm(ba) * np.linalg.norm(bc))\n\n # because of precision issues, sometimes cosine_angle is something linke -1.000000001\n # we make sure we only pass the correct arguments to np.arccos()\n if cosine_angle > 1:\n cosine_angle = 1\n elif cosine_angle < -1:\n cosine_angle = -1\n\n angle = np.arccos(cosine_angle)\n\n return np.degrees(angle)", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr" ]
[ "0.6499765", "0.64251083", "0.629712", "0.61103505", "0.594957", "0.58397025", "0.58203536", "0.57837665", "0.5770846", "0.57563686", "0.5699609", "0.5689523", "0.56460613", "0.56327623", "0.561697", "0.5562077", "0.5553344", "0.5546299", "0.5523779", "0.55019164", "0.54991394", "0.5494375", "0.5482069", "0.5479521", "0.54536027", "0.5439109", "0.5434319", "0.5432098", "0.5386073", "0.53753316", "0.5367587", "0.5339729", "0.532968", "0.52952707", "0.5294694", "0.52885866", "0.5286744", "0.52704644", "0.52696395", "0.526192", "0.52560693", "0.52548176", "0.5253055", "0.52485985", "0.52455753", "0.52444696", "0.52427924", "0.5242343", "0.523937", "0.5236717", "0.5232353", "0.5231977", "0.523122", "0.5223444", "0.52143985", "0.5209632", "0.5205117", "0.5205117", "0.52035326", "0.5199059", "0.51935863", "0.5190369", "0.51889825", "0.51840544", "0.5174515", "0.5164039", "0.5162914", "0.51618767", "0.51570016", "0.51560724", "0.51550114", "0.51527035", "0.5149321", "0.51421165", "0.514173", "0.51378495", "0.5136769", "0.51328444", "0.51136154", "0.51095504", "0.5106502", "0.5105145", "0.510129", "0.51008177", "0.509949", "0.5092551", "0.5085775", "0.5085775", "0.5085775", "0.5084266", "0.5084246", "0.5078077", "0.5069468", "0.5065538", "0.50619984", "0.5057336", "0.5052917", "0.5052307", "0.50514644", "0.5050967" ]
0.69551015
0
Return an image created from a text file. The first line of the image should contain "nx ny nz" (separated by whitespace) All subsequent lines contain "ix iy iz val", where ix, iy, and iz are the integer x, y, and z coordinates of the point and val is the floating point value of that point. All points not explicitly listed are set to zero.
def get_textimage(fname): from string import atoi,atof infile = open(fname) lines = infile.readlines() infile.close() data = lines[0].split() nx = atoi(data[0]) ny = atoi(data[1]) nz = atoi(data[2]) e = EMData() e.set_size(nx, ny, nz) e.to_zero() for line in lines[1:]: data = line.split() ix = atoi(data[0]) iy = atoi(data[1]) iz = atoi(data[2]) val = atof(data[3]) e[ix,iy,iz] = val return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_image(filename):\n try:\n fi = open(filename,\"r\")\n lines = fi.readlines()\n n = int(lines[0]);\n img = create_zeroed_image(n)\n for i,line in enumerate(lines[1:]):\n clean_line = line.strip() # remove whitespace and newlines\n for j,char in enumerate(clean_line):\n # your code here\n \n img[i][j]=char\n # end your code here\n return img\n except IOError:\n raise Exception(\"Cannot find file \" + filename);\n finally:\n fi.close()", "def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)", "def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z", "def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')", "def inithr(_filename):\n # Open file provided\n _file = open(_filename)\n # Create empty array to hold data\n _data = np.zeros((1, 3), dtype=float)\n\n # Iterate through the file line by line\n for _line in _file:\n # Split each line into constituent values\n _x = _line.split()\n # Append data array with each value, converted to float, convert parallax angle to distance\n _data = np.append(_data, np.array([float(_x[1]), float(_x[2]), (1 / float(_x[3]))], ndmin=2), axis=0)\n\n # Iterate through data array\n for _row in _data:\n np.seterr(divide='ignore')\n # Convert magnitude to luminosity\n _row[0] = _row[0] - 5 * (np.log10(_row[2]) - 1)\n # Convert B-V colour to temperature\n _row[1] = 4600 * ((1 / (0.92 * _row[1] + 1.7)) + 1 / (0.92 * _row[1] + 0.62))\n\n # Delete first empty row\n _data = np.delete(_data, 0, axis=0)\n\n # Return parsed data\n return _data", "def file_parser(file_name):\n h = 480\n w = 640\n out = []\n with open(file_name, 'r') as f:\n line_num = 1\n for line in f:\n if line_num < 17:\n # Read to where data starts\n line_num += 1\n continue\n elif line_num > 74:\n break\n # print(list(map(int, line.strip().split(\" \"))))\n vals = line.split()\n # print(list(\"\".join(line)))\n # print(line.split())\n assert(float(vals[2]) < 640)\n assert(float(vals[3]) < 480)\n point = [float(vals[2]) * w, float(vals[3]) * h]\n # print(point)\n out.append(point)\n line_num += 1\n\n out.append([0,0])\n out.append([w-1, 0])\n out.append([0, h-1])\n out.append([w-1, h-2])\n return out", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def init_from_obj_file(cls, f, scale=1, density=1):\n lines = [line.strip() for line in f.readlines()]\n vertices = []\n indexes = []\n for line in lines:\n if line.startswith(\"v\"): # vertex\n nums = list(map(float, string_to_list(line[2:])))\n vertices.append(scale * np.array(nums[:3]))\n # x.append(nums[0] * scale)\n # y.append(nums[1] * scale)\n # z.append(nums[2] * scale)\n elif line.startswith(\"f\"): # face\n nums = list(map(lambda a: int(a) - 1, string_to_list(line[2:])))\n indexes.append(nums)\n return cls(vertices, indexes, density)", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def add_to_matrix_from_file(filename):\n\tif not os.path.exists(filename):\n\t\treturn []\n\n\tpng_image = Image.open(filename)\n\tpng_image.thumbnail((32, 32), Image.ANTIALIAS)\n\n\tcol = 0\n\timage = []\n\tfor pixel in list(png_image.getdata()):\n\t\t# TODO: something better than taking the red value\n\t\tif png_image.mode == 'L':\n\t\t\tpixelr = pixel\n\t\telif png_image.mode == 'RGB':\n\t\t\tpixelr, pixelg, pixelb = pixel\n\t\telif png_image.mode == 'RGBA':\n\t\t\tpixelr, pixelg, pixelb, pixela = pixel\n\t\timage.append(int(pixelr))\n\t\tcol += 1\n\treturn image", "def create_from_file(cls, file_name: str) -> \"TensorImage\":\n image_data = image_utils.decode_image_from_file(file_name)\n return cls(image_data, is_from_numpy_array=False)", "def format_data(filename,dummy=False):\n data = np.matrix(np.loadtxt(filename))\n Y = data[:,-1]\n X = data[:,0:-1]\n if dummy:\n X = np.concatenate((np.ones((len(X), 1)), X), 1)\n return(X,Y)", "def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def build_from_file(path):\n with open(path) as obj:\n raw_file = obj.read()\n file_lines = [line.split(\" \") for line in raw_file.split(\"\\n\")]\n\n vertices = {}\n faces = []\n for number, line in enumerate(file_lines):\n if line[0] == \"v\":\n vertices[number + 1] = tuple(map(float, line[1:]))\n if line[0] == \"f\":\n face = []\n for index in line[1:]:\n face.append(vertices[int(index)])\n face.append(vertices[int(line[1])])\n faces.append(face)\n return Object(points=faces)", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y", "def _parse_txt(path, n_channels):\n f = open(path)\n lines = f.readlines()\n f.close()\n\n geom = np.zeros((0, 2))\n\n for i, line in zip(range(n_channels), lines):\n line = line.replace('\\r', '')\n line = line.replace('\\n', '')\n row = line.split(' ')\n geom = np.vstack((geom, row[:2])).astype('float')\n\n return geom", "def get_preprocessed_image(file_name):\n\n im = np.array(Image.open(file_name)).astype(np.float32)\n assert im.ndim == 3, 'Only RGB images are supported.'\n im = im - _IMAGENET_MEANS\n im = im[:, :, ::-1] # Convert to BGR\n img_h, img_w, img_c = im.shape\n assert img_c == 3, 'Only RGB images are supported.'\n if img_h > 500 or img_w > 500:\n raise ValueError('Please resize your images to be not bigger than 500 x 500.')\n\n pad_h = 500 - img_h\n pad_w = 500 - img_w\n im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode='constant', constant_values=0)\n return np.expand_dims(im.astype(np.float32), 0), img_h, img_w", "def loadtext(infile):\n warr, farr, earr=np.loadtxt(infile, usecols=(0,1,2), unpack=True)\n return create_spectrum(warr, farr, earr)", "def read_from(self, filename):\n\n lon, lat, field, weight = [], [], [], []\n\n if os.path.exists(filename):\n logger.info(\"Reading data from file {0}\".format(filename))\n with open(filename, 'r') as f:\n line = f.readline()\n ncols = len(line.split())\n while ncols >= 3:\n lon.append(float(line.split()[0]))\n lat.append(float(line.split()[1]))\n field.append(float(line.split()[2]))\n if ncols >= 4:\n weight.append(float(line.split()[3]))\n else:\n weight.append(1.)\n line = f.readline()\n ncols = len(line.split())\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n self.field = np.array(field)\n self.weight = np.array(weight)\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')", "def read_file ( filename ):\r\n\t# lecture de l'en-tete\r\n\tinfile = open ( filename, \"r\" ) \r\n\tnb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\r\n\r\n\t# creation de la structure de donnees pour sauver les images :\r\n\t# c'est un tableau de listes (1 par classe)\r\n\tdata = np.empty ( 10, dtype=object ) \r\n\tfiller = np.frompyfunc(lambda x: list(), 1, 1)\r\n\tfiller( data, data )\r\n\r\n\t# lecture des images du fichier et tri, classe par classe\r\n\tfor ligne in infile:\r\n\t\tchamps = ligne.split ()\r\n\t\tif len ( champs ) == nb_features + 1:\r\n\t\t\tclasse = int ( champs.pop ( 0 ) )\r\n\t\t\tdata[classe].append ( map ( lambda x: float(x), champs ) ) \r\n\tinfile.close ()\r\n\r\n\t# transformation des list en array\r\n\toutput = np.empty ( 10, dtype=object )\r\n\tfiller2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\r\n\tfiller2 ( data, output )\r\n\r\n\treturn output", "def loadtext(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def read_data(infile):\n extension = os.path.splitext(infile)[1]\n h = read_header(infile)\n nx = int(h['num_x_pts'])\n ny = int(h['num_y_pts'])\n nt = int(h['num_t_pts'])\n fid = open(infile, 'rb')\n fid.seek(512) #skip header\n if extension == '.aps' or extension == '.a3daps':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, ny, nt, order='F').copy() #make N-d image\n elif extension == '.a3d':\n if(h['word_type']==7): #float32\n data = np.fromfile(fid, dtype = np.float32, count = nx * ny * nt)\n elif(h['word_type']==4): #uint16\n data = np.fromfile(fid, dtype = np.uint16, count = nx * ny * nt)\n data = data * h['data_scale_factor'] #scaling factor\n data = data.reshape(nx, nt, ny, order='F').copy() #make N-d image\n elif extension == '.ahi':\n data = np.fromfile(fid, dtype = np.float32, count = 2* nx * ny * nt)\n data = data.reshape(2, ny, nx, nt, order='F').copy()\n real = data[0,:,:,:].copy()\n imag = data[1,:,:,:].copy()\n fid.close()\n if extension != '.ahi':\n return data\n else:\n return real, imag", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def _openFlt(self, fname):\n image = np.loadtxt(fname)\n\n if(image !=None):\n M,N=(int(image[0]), int(image[1]))\n image = image[2:image.shape[0]]\n image = image.reshape((M,N))\n else:\n raise IOError, \"Image file can not be opened\"\n\n return image", "def img2vector(filename):\n img_vector = zeros((1, 32*32))\n with open(filename, 'r') as fr:\n for row in xrange(32):\n line = fr.readline()\n for column in xrange(32):\n img_vector[0, 32*row+column] = int(line[column])\n\n return img_vector", "def open_image(infile):\n with fits.open(infile) as f:\n header = f[0].header\n data = f[0].data\n if data.ndim == 2:\n # NAXIS=2: [Y, X]\n image = data\n elif data.ndim == 3 and data.shape[0] == 1:\n # NAXIS=3: [FREQ=1, Y, X]\n image = data[0, :, :]\n elif data.ndim == 4 and data.shape[0] == 1 and data.shape[1] == 1:\n # NAXIS=4: [STOKES=1, FREQ=1, Y, X]\n image = data[0, 0, :, :]\n else:\n raise ValueError(\"Slice '{0}' has invalid dimensions: {1}\".format(\n infile, data.shape))\n return (header, image)", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def f2image(infile, maskit=True):\n global master_mask\n\n tab = Table.read(infile, format=\"ascii\", names=(\"quadrant\", \"detx\", \"dety\", \"area\"), comment=\"#\")\n pixel_edges = np.arange(-0.5, 63.6)\n image = np.zeros((128,128))\n im_sub = np.zeros((64,64))\n\n data = tab[tab[\"quadrant\"] == 0]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[64:128,0:64] = np.copy(im_sub)\n\n data = tab[tab[\"quadrant\"] == 1]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[64:128,64:128] = np.copy(im_sub)\n\n data = tab[tab[\"quadrant\"] == 2]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[0:64,64:128] = np.copy(im_sub)\n\n data = tab[tab[\"quadrant\"] == 3]\n im_sub[data['detx'], data['dety']] = data['area']\n im_sub = np.transpose(im_sub)\n image[0:64,0:64] = np.copy(im_sub)\n\n if maskit:\n image[master_mask] = 0\n\n return image", "def from_file(self, file, num):\n l = file.readline()\n\n name = None\n left = None\n right = None\n center = None\n order = None\n shift = None\n svs = None\n\n while l:\n if l.find('%d=' % num) > -1:\n if name is None: name = parse_name(l, 'kernel_name%d' % num)\n if left is None: left = parse_int(l, 'kernel_left%d' % num)\n if right is None: right = parse_int(l, 'kernel_right%d' % num)\n if center is None: center = parse_int(l, 'kernel_center%d' % num)\n if order is None: order = parse_int(l, 'kernel_order%d' % num)\n if shift is None: shift = parse_int(l, 'kernel_shift%d' % num)\n if svs is None: svs = parse_string(l, file, 'kernel_svs%d' % num)\n else:\n self.window = (left, center, right)\n return self.init_sensor({ 'name' : name, 'order': order, 'shift' : shift}, svs)\n\n l = file.readline()", "def read_KNN_dataFile(file):\n A = np.genfromtxt(file)\n return A", "def read( self, Filename ):\n try:\n self.name = Filename\n Filedata = open(self.name,'r').readlines()\n self.ncols = string.atoi( Filedata[0].strip().split()[-1] )\n self.nrows = string.atoi( Filedata[1].strip().split()[-1] )\n self.xllcorner = string.atof( Filedata[2].strip().split()[-1] )\n self.yllcorner = string.atof( Filedata[3].strip().split()[-1] )\n self.cellsize = string.atof( Filedata[4].strip().split()[-1] )\n self.nodata = string.atof( Filedata[5].strip().split()[-1] )\n self.data = numpy.ones( (self.nrows, self.ncols ) ) *1.0\n row = self.nrows\n for t in Filedata[6:]:\n row -= 1\n col = -1\n values = map(string.atof, t.strip().split())\n for x in values:\n col += 1\n self.data[(row,col)] = x\n except:\n print \"Error opening grid ::\", Filename\n raise", "def load_stack(filename):\n data = np.genfromtxt(filename, skip_header=1)\n index_arr = data[:, 2]\n thickness_arr = data[:, 3] / 1e9\n stack = Stack(index_arr, thickness_arr)\n return stack", "def read_info(file, shape):\n for line in file:\n if line.startswith('mid'):\n shape.set_id(int(line.split()[-1])) \n\n if line.startswith('avg_depth'):\n shape.set_avg_depth(float(line.split()[-1]))\n if line.startswith('center'):\n pattern = 'center: \\((?P<x>.*),(?P<y>.*),(?P<z>.*)\\)'\n matches = re.match(pattern, line)\n shape.set_center((float(matches.group('x')),\n float(matches.group('y')),\n float(matches.group('z'))))\n if line.startswith('scale'):\n shape.set_scale(float(line.split()[-1]))\n\n return shape", "def readCubeFile(self, filename):\n\n inputfile = open(filename, \"r\")\n header = \"\".join([inputfile.readline(), inputfile.readline()])\n\n temp = inputfile.readline().strip().split()\n self.numAtoms = int(temp[0])\n self.origin = list(map(float, temp[1:]))\n\n self.numPoints = [0] * 3\n self.spacing = [0] * 3\n for i in range(3):\n line = inputfile.readline().strip().split()\n self.numPoints[i] = int(line[0])\n temp = list(map(float, line[1:]))\n self.spacing[i] = temp[i]\n assert sum(temp[:i] + temp[i + 1:]) == 0\n\n # Read in the lines with atom data\n for i in range(self.numAtoms):\n line = inputfile.readline()\n\n self.data = np.zeros((self.numPoints[1], self.numPoints[0], self.numPoints[2]), \"float\")\n i = j = k = 0\n while i < self.numPoints[1]:\n line = next(inputfile)\n temp = list(map(float, line.strip().split()))\n for x in range(0, len(temp)):\n self.data[j, i, x + k] = temp[x]\n\n k += len(temp)\n if k == self.numPoints[2]:\n j += 1\n k = 0\n if j == self.numPoints[1]:\n i += 1\n j = 0\n\n inputfile.close()", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def loadtext2(infile):\n warrsn, farrsn =np.loadtxt(infile, usecols=(0, 1), unpack=True)\n return create_spectrum(warrsn, farrsn)", "def read_image(filename, representation):\n img = imread(filename)\n img = int2float(img)\n if representation == GS_REP:\n img = rgb2gray(img)\n return img", "def parse_data(filename, data_path, ground_truths_path):\n with open(filename) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n\n data = []\n for i, item in enumerate(content):\n if i == 0:\n continue\n parametres = item.split(',')\n\n image = cv2.imread(os.path.join(data_path, parametres[0]), -1)\n image_processed = image * np.uint16(65535.0 / max(image.ravel()))\n image_processed = cv2.resize(image_processed, (960, 960), interpolation = cv2.INTER_AREA)\n\n ground_truth = cv2.imread(os.path.join(ground_truths_path, parametres[0][:parametres[0].rfind('.')] + \".png\"), -1)\n ground_truth_processed = np.uint16(np.copy(ground_truth))\n indices = np.where(np.any(ground_truth_processed != [0, 0, 255], axis = -1))\n ground_truth_processed[indices] = [0, 0, 0]\n indices = np.where(np.all(ground_truth_processed == [0, 0, 255], axis = -1))\n ground_truth_processed[indices] = [65535, 65535, 65535]\n ground_truth_processed = cv2.cvtColor(ground_truth_processed, cv2.COLOR_BGR2GRAY)\n ground_truth_processed = cv2.resize(ground_truth_processed, (960, 960), interpolation = cv2.INTER_AREA)\n \n img = Image(image, image_processed, ground_truth, ground_truth_processed,\n parametres[0], parametres[1], parametres[2], parametres[3], \n parametres[4], parametres[5], parametres[6], parametres[7], \n parametres[8])\n data.append(img)\n\n return data", "def parse_file(filename):\n\n f = open(filename, 'r')\n BoardSize = int( f.readline())\n NumVals = int(f.readline())\n\n #initialize a blank board\n board= [ [ 0 for i in range(BoardSize) ] for j in range(BoardSize) ]\n\n #populate the board with initial values\n for i in range(NumVals):\n line = f.readline()\n chars = line.split()\n row = int(chars[0])\n col = int(chars[1])\n val = int(chars[2])\n board[row-1][col-1]=val\n\n return board", "def load_velodyne_points(filename):\n points = np.fromfile(filename, dtype=np.float32).reshape(-1, 4)\n points[:, 3] = 1.0 # homogeneous\n return points", "def imread_float(infile):\n return img_as_float(imread(infile))", "def __init__(self, f):\n with open(f,'r') as gridfile:\n self.matrix = ([[char for char in row if char != '\\n'] \n for row in gridfile.readlines()])\n # beginning represents the upper most \n self.beginning = Square(0, 0, self)", "def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])", "def file_reader(filename = 'conv_params'):\n\n with open(filename) as f:\n info = f.readlines()\n info = [i.strip() for i in info] # each element in info is a string of a line from the file\n info = [i.split() for i in info] # split each whitespace delimited element into a list of lists\n info = [[i.split('-') for i in j] for j in info] # note info is 3 layers deep\n\n info[2] = info[2][0] # makes default E just a single string of the number\n info[3] = info[3][0]\n\n return info", "def get_pict_data(fname):\n with open(fname, 'r') as f:\n return np.asarray(f.read().split(',')).reshape((11, 1024)).astype(int)", "def read_and_interp_psd_from_txt(filename):\n psd = read_psd_from_txt(filename)\n return interpolate_psd(psd)", "def myloadtxt(fname, skiprows = 0):\n fin = file(fname)\n for i in range(skiprows):\n fin.readline()\n ln = fin.readline()\n lns = []\n while (ln != \"\"):\n thisln = []\n ln = ln.strip().split()\n for s in ln:\n try:\n f = float(s)\n except:\n f = None\n thisln.append(f)\n lns.append(thisln)\n ln = fin.readline()\n return np.array(lns)", "def readprimitive(f): \n \n ## read in lines from input file and ignore blank lines and comment lines\n lines = [line.rstrip() for line in f if line.rstrip() if line[0] != '#']\n\n # a1,a2,a3\n A = np.array([[float(lines[0].split()[0]),float(lines[0].split()[1]),float(lines[0].split()[2])],\n [float(lines[1].split()[0]),float(lines[1].split()[1]),float(lines[1].split()[2])],\n [float(lines[2].split()[0]),float(lines[2].split()[1]),float(lines[2].split()[2])]]).T\n \n # number of basis atoms\n num_basis = int(lines[3].split()[0]) \n\n # basis atom positions in unit cell\n unitcell_pos = []\n for i in range(num_basis): \n unitcell_pos.append([float(lines[4+i].split()[0]),float(lines[4+i].split()[1]),float(lines[4+i].split()[2])]) \n \n return (A,unitcell_pos)", "def Read_CCD_image(Path):\n fs = open(Path, 'r')\n \n #Compte le nombre de lignes, oblige pr le moment de tout lire\n # la dernière ligne est vide ! attention, j'initialise nb_line à -1 pour compenser\n nb_line = -1\n while 1: \n txt = fs.readline()\n nb_line = nb_line+1\n if ((txt =='')|(txt == '\\r\\n')): \n break\n fs.close()\n \n \n # je lis une ligne, compte le nombre d'espace et en deduit le nombre de colonne de la matrice\n fs = open(Path, 'r')\n txt = fs.readline()\n ii = 0\n index_line = []\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1 \n if (txt[ii:ii+1] == '\\t'):\n index_line.append(ii)\n if (txt[ii:ii+4] == '\\r\\n'):\n break\n nb_col = np.array(index_line).size\n fs.close()\n \n image = np.ones((nb_line,nb_col), dtype = float) # Create the image matrix\n # Pour les axes, je reprends les chiffres obtenus lors de la calibration du mouvement de la pointe.... cad 31nm/pixel...\n #axex = np.linspace(0,0.032*nb_line,nb_line) #microns\n #axey = np.linspace(0,0.032*nb_col,nb_col) #microns\n axex = np.linspace(0,nb_line,nb_line) #pixels\n axey = np.linspace(0,nb_col,nb_col) #pixels\n \n fs = open(Path, 'r')\n \n nb_line = 0 # I need to count the lines to fill the matrix\n while 1: \n txt = fs.readline()\n if ((txt =='')|(txt == '\\r\\n')): \n break\n if txt[0] =='#':\n pass\n else:\n #print(txt)\n ii=-1\n index_line=[]\n while 1: # on cherche le premier espace qui limite le premier nombre\n ii = ii+1 \n if (txt[ii:ii+1] == '\\t'):\n index_line.append(ii)\n if (txt[ii:ii+4] == '\\r\\n'):\n break\n # ici j'ai tous mes index d'espace pour une ligne normalement\n line = []\n line.append(txt[:index_line[0]])\n index_line = np.array(index_line) # premier nombre\n for ii in range (index_line.size -1):\n line.append(np.float(txt[index_line[ii]:index_line[ii+1]]))\n # Il me manque le dernier aussi\n #line.append(np.float(txt[index_line[-1]:])) \n image[nb_line,:] = line\n nb_line = nb_line+1\n #flipping up-down with [::-1,...] then image appears in Python as in the screen in HiPic \n return axex,axey,image[::-1,...]", "def read_data(self,filename):\n self.x = [] #Input values\n self.t = [] #Target values\n\n with open(filename, \"r\") as infile:\n lines = infile.readlines()\n self.n = len(lines)\n for line in lines:\n words = line.split()\n self.x.append(float(words[0]))\n self.t.append(float(words[1]))\n\n self.x = np.array(self.x)\n self.t = np.array(self.t)\n self.create_design_matrix()", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var", "def read_pattern_file(file_path: str) -> np.ndarray:\n\n # Check if the example file exists\n if not os.path.isfile(file_path):\n return None\n\n rows = 0\n cols = 0\n with open(file_path) as f:\n for i, l in enumerate(f):\n if l[0] != \"!\":\n rows += 1\n if len(l) > cols:\n cols = len(l) - 1 # Exclude the end of line char from the column count\n\n grid = np.zeros((rows, cols), dtype=np.uint8)\n\n skip_rows = 0\n with open(file_path) as f:\n for j, line in enumerate(f):\n for k, c in enumerate(line):\n if c == \"!\" and k == 0:\n skip_rows += 1\n break\n elif c == \"O\":\n grid[j - skip_rows, k] = 1\n\n return grid", "def make_from_file(filehandle):\n lines = filehandle.readlines()\n label = str(lines[0].rstrip('\\n'))\n mass = float(lines[1].rstrip('\\n'))\n position = list(lines[2].rstrip('\\n').split(','))\n velocity = list(lines[3].rstrip('\\n').split(','))\n particle = Particle3D(label=label, mass=mass, position=position, velocity=velocity)\n filehandle.close()\n return particle", "def readmesh(filename):\n f = open(filename, 'rb')\n cells = []\n edges = []\n\n # create each cell and edge, etc\n for line in f:\n \n return cells, edges", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def load_pfm(filename):\n filename = process(filename)\n with open(filename, \"r\", encoding=\"ISO-8859-1\") as file:\n nc = 3 if file.readline().rstrip() == \"PF\" else 1\n width, height = [int(x) for x in file.readline().rstrip().split()]\n shape = (height, width, nc)\n img = np.fromfile(file, '{0}{1}'.format(\"<\" if float(file.readline().rstrip()) < 0 else \">\",'f') )\n img = np.reshape(img, shape)\n return np.flip(np.flip(img, 2), 0).copy()", "def load_raster_xyz(self, filename):\n with rasterio.open(filename, 'r') as src:\n ## Alias 'affine' no longer works for 'transform'\n ##matrix = src.affine\n matrix = src.transform\n self.size = (src.width, src.height)\n # read per scan line\n for row in range(0, src.height):\n window = ((row, row+1), (0, src.width))\n data = src.read(window=window)\n this_row = data[0][0]\n for column in range(0, src.width):\n x, y = matrix * (column, row)\n yield x, y, this_row[column]", "def read_image(filename, representation):\n image = scipy.misc.imread(filename)\n if int(representation) == 1:\n image = rgb2gray(image)\n return img_as_float(image)", "def make_image(file):\n image = cv2.imread(file, 0)\n image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))\n return np.array([np.array(image), np.array([0, 0])])", "def _fileToMatrix(file_name):\r\n # TODO: np.loadtxt() might be an alternative\r\n # try:\r\n if 1 < 3:\r\n lres = []\r\n for line in open(file_name, 'r').readlines():\r\n if len(line) > 0 and line[0] not in ('%', '#'):\r\n lres.append(list(map(float, line.split())))\r\n res = lres\r\n else:\r\n fil = open(file_name, 'r')\r\n fil.readline() # rudimentary, assume one comment line\r\n lineToRow = lambda line: list(map(float, line.split()))\r\n res = list(map(lineToRow, fil.readlines()))\r\n fil.close() # close file could be omitted, reference counting should do during garbage collection, but...\r\n\r\n while res != [] and res[0] == []: # remove further leading empty lines\r\n del res[0]\r\n return res\r\n # except:\r\n print('could not read file ' + file_name)", "def read_flow_png(flow_file):\n flow_object = png.Reader(filename=flow_file)\n flow_direct = flow_object.asDirect()\n flow_data = list(flow_direct[2])\n (w, h) = flow_direct[3]['size']\n flow = np.zeros((h, w, 3), dtype=np.float64)\n for i in range(len(flow_data)):\n flow[i, :, 0] = flow_data[i][0::3]\n flow[i, :, 1] = flow_data[i][1::3]\n flow[i, :, 2] = flow_data[i][2::3]\n\n invalid_idx = (flow[:, :, 2] == 0)\n flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0\n flow[invalid_idx, 0] = 0\n flow[invalid_idx, 1] = 0\n return flow", "def load(filename):\n lines = [l.strip('\\r\\n ') for l in open(filename, 'r').readlines()]\n lines = [l for l in lines if l != '']\n dims = [re.split(r'\\s+', l) for l in lines]\n f = np.array([[float(f) for f in d] for d in dims])\n return f", "def parseMatrix(file):\n array = genfromtxt(file, delimiter=',')\n\n if not np.isnan(array).any():\n raise ValueError(\"This array does not contain any 'nan' values\")\n\n return array", "def load(self, _name):\r\n with open(_name, 'r') as fin:\r\n self.filename = _name\r\n\r\n self.comment_1 = fin.readline() # Save 1st comment\r\n self.comment_2 = fin.readline() # Save 2nd comment\r\n\r\n _str = fin.readline().split() # Number of Atoms and Origin\r\n self.n_atoms = int(_str[0]) # Number of Atoms\r\n self.origin = np.array([float(_str[1]), float(_str[2]), float(_str[3])]) # Position of Origin\r\n\r\n nVoxel = fin.readline().split() # Number of Voxels\r\n self.n_x = int(nVoxel[0])\r\n self.x = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n nVoxel = fin.readline().split() #\r\n self.n_y = int(nVoxel[0])\r\n self.y = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n nVoxel = fin.readline().split() #\r\n self.n_z = int(nVoxel[0])\r\n self.z = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n self.atoms = []\r\n self.atoms_xyz = []\r\n for atom in range(self.n_atoms):\r\n line = fin.readline().split()\r\n self.atoms.append(line[0])\r\n self.atoms_xyz.append(list(map(float, [line[2], line[3], line[4]])))\r\n\r\n self.data = np.zeros((self.n_x, self.n_y, self.n_z))\r\n\r\n i = int(0)\r\n for s in fin:\r\n for v in s.split():\r\n self.data[int(i / (self.n_y * self.n_z)), int((i / self.n_z) % self.n_y),\r\n int(i % self.n_z)] = float(v)\r\n i += 1\r\n\r\n return None", "def loadtxt(filename, save=True, stripe=True, blank=False):\n a = []\n f = open(filename, 'r')\n fn = filename.split('.')[0]\n for line in f.readlines():\n line = line.strip()\n elems = [int(i) for i in list(line)]\n a.append(elems)\n b = np.array(a, dtype=np.int)\n np.fill_diagonal(b, 0)\n if blank:\n b = remove_blanks(b)\n if save:\n np.savez('.'.join((fn, 'npz')), a=b)\n else:\n return b", "def read_ascii_raster(ascii_raster_file):\n import numpy as np\n\n with open(ascii_raster_file) as f:\n header_data = [float(f.next().split()[1]) for x in xrange(6)] #read the first 6 lines\n\n raster_data = np.genfromtxt(ascii_raster_file, delimiter=' ', skip_header=6)\n raster_data = raster_data.reshape(header_data[1], header_data[0]) #rows, columns\n\n return raster_data, header_data", "def load_image_patch(filename):\n im = Image.open(filename) # .convert('L')\n width, height = im.size\n pixels = list(im.getdata())\n features = [pixels[i * width:(i + 1) * width] for i in range(height)]\n features = np.asarray(im, dtype=np.float32).flatten()\n features /= 255.0\n return features", "def load_txt(file_path):\n lines = load_lines(file_path)\n\n if 'E' in lines[0]:\n dtype = np.float32\n else:\n dtype = np.int32\n\n data = list(map(str.split, lines))\n array = np.array(data, dtype=dtype)\n return array", "def load_picked_points(filename):\n\n f = open(filename, 'r')\n\n def get_num(string):\n pos1 = string.find('\\\"')\n pos2 = string.find('\\\"', pos1 + 1)\n return float(string[pos1 + 1:pos2])\n\n def get_point(str_array):\n if 'x=' in str_array[0] and 'y=' in str_array[1] and 'z=' in str_array[2]:\n return [get_num(str_array[0]), get_num(str_array[1]), get_num(str_array[2])]\n else:\n return []\n\n pickedPoints = []\n for line in f:\n if 'point' in line:\n str = line.split()\n if len(str) < 4:\n continue\n ix = [i for i, s in enumerate(str) if 'x=' in s][0]\n iy = [i for i, s in enumerate(str) if 'y=' in s][0]\n iz = [i for i, s in enumerate(str) if 'z=' in s][0]\n pickedPoints.append(get_point([str[ix], str[iy], str[iz]]))\n f.close()\n return np.array(pickedPoints)", "def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)", "def load_file(filename):\r\n file =np.genfromtxt(filename, delimiter=',')\r\n return file", "def from_file(cls, filename: str) -> \"NDOptimiser\":\n from autode.opt.coordinates.cartesian import CartesianCoordinates\n\n lines = open(filename, \"r\").readlines()\n n_atoms = int(lines[0].split()[0])\n\n title_line = NumericStringDict(lines[1])\n optimiser = cls(\n maxiter=int(title_line[\"maxiter\"]),\n gtol=GradientRMS(title_line[\"gtol\"]),\n etol=PotentialEnergy(title_line[\"etol\"]),\n )\n\n for i in range(0, len(lines), n_atoms + 2):\n raw_coordinates = np.zeros(shape=(n_atoms, 3))\n gradient = np.zeros(shape=(n_atoms, 3))\n\n for j, line in enumerate(lines[i + 2 : i + n_atoms + 2]):\n _, x, y, z, dedx, dedy, dedz = line.split()\n raw_coordinates[j, :] = [float(x), float(y), float(z)]\n gradient[j, :] = [float(dedx), float(dedy), float(dedz)]\n\n coords = CartesianCoordinates(raw_coordinates)\n coords.e = NumericStringDict(lines[i + 1])[\"E\"]\n coords.g = gradient.flatten()\n\n optimiser._history.append(coords)\n\n return optimiser", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def load_data (filename):\n from StringIO import StringIO # StringIO behaves like a file object\n example_data = \"0.0001E-10 1.0E-9\\n0.5E-10 1.2E-9\\n1.0E-10 1.5E-9\\n1.5E-10 1.6E-9\\n2.0E-10 1.7E-9\\n \\\n 2.5E-10 1.8E-9\\n3.0E-10 1.7E-9\\n3.5E-10 1.6E-9\\n4.0E-10 1.5E-9\\n4.5E-10 1.2E-9\\n4.9999E-10 1.0E-9\"\n if (filename==None):\n FILE = StringIO(example_data)\n print \"Using example data\"\n else:\n FILE = open(filename,'r')\n print \"Loading data from %s\" % filename\n x,y = np.loadtxt(FILE, unpack=True)\n FILE.close()\n return(x,y)", "def fill(fname):\n return [[float(line.split()[-3]), float(line.split()[-1])]\\\n for line in open(fname).readlines()\\\n if FITRESRE1.match(line)]", "def read_txt(path):\n mz = []\n i = []\n with open(path) as f:\n for line in f:\n line = line.split()\n mz.append(float(line[0]))\n i.append(float(line[1]))\n return mz, i", "def read_image(filename, representation):\n\n color_flag = True #if RGB image\n image = imread(filename)\n\n float_image = image.astype(np.float64)\n\n if not np.all(image <= 1):\n float_image /= NORMALIZE #Normalized to range [0,1]\n\n if len(float_image.shape) != 3 : #Checks if RGB or Grayscale\n color_flag = False\n\n if color_flag and representation == 1 : #Checks if need RGB to Gray\n return skimage.color.rgb2gray(float_image)\n\n # Same coloring already\n return float_image", "def load_data(file_name):\r\n with open(str(file_name)) as f:\r\n file = f.readlines()\r\n label = file[0].strip().split(\",\")[1:-1]\r\n file = file[1:]\r\n row = len(file)\r\n col = len(file[0].strip().split(\",\"))-1\r\n mat = np.zeros((row, col))\r\n for i in range(len(file)):\r\n row = file[i].strip()\r\n cols = row.split(\",\")[1:]\r\n for j in range(len(cols)):\r\n mat[i][j] = int(cols[j])\r\n np.random.seed(10)\r\n np.random.shuffle(mat)\r\n X = mat[:, 0:-1]\r\n y = mat[:, -1]\r\n\r\n return X, y, label", "def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n mag.append(float(line.strip().split(' ')[2]))\n emag.append(line.strip().split(' ')[3])\n fmag.append(line.strip().split(' ')[4])\n unit.append(line.strip().split(' ')[5])\n beam.append(line.strip().split(' ')[6])\n odate.append(line.strip().split(' ')[7])\n ref.append(line.strip().split(' ')[8])\n \n return wvlen, band, mag, emag, fmag, unit, beam, odate, ref", "def get_g(file_name):\n \n r,g = np.loadtxt(file_name, dtype = 'float', unpack = 'true')\n \n return r,g", "def initFromFile(self,file):\n self.source = file\n file_reader = open(file,\"r\")\n self.isInit = True\n lineCounter = 0\n firstLine = None\n SecondLine = None\n ThirdLine = None\n for line in file_reader:\n if(lineCounter == 0):\n firstLine = line.split()\n self.rowsNumber = int(firstLine[0])\n self.columnsNumber = int(firstLine[1])\n self.routerRangeRadius = int(firstLine[2])\n if(lineCounter == 1):\n SecondLine = line.split()\n self.backBoneCosts = int(SecondLine[0])\n Path.backBoneCost = self.backBoneCosts\n self.routerCosts = int(SecondLine[1])\n self.budget = int(SecondLine[2])\n if(lineCounter == 2):\n ThirdLine = line.split()\n self.firstCell = Cell(int(ThirdLine[0]),int(ThirdLine[1]))\n if(lineCounter>2):\n self.map.append([])\n LINE = line\n columnCounter = 0\n for char in LINE:\n temp = Cell(len(self.map)-1,columnCounter,Cell.getCellType(char))\n self.map[len(self.map)-1].append(temp)\n if(temp.cellType == \"FLOOR\"):\n self.notComputeRouter.append(temp)\n columnCounter += 1\n lineCounter +=1\n self.isInit = True", "def _parse_single(filename, label, image_size=IMAGE_SIZE):\n # Decode and convert image to appropriate type\n image = tf.image.decode_png(tf.read_file(filename), channels=image_size[2])\n image = tf.image.convert_image_dtype(image, tf.float32) # Also scales from [0, 255] to [0, 1)\n # Resize according to module requirements\n image = tf.image.resize_images(image, image_size[:2])\n return image, label", "def open_xy(data):\n twotheta, intensity = [], []\n with open(data) as f:\n for line in f:\n row = line.split()\n twotheta.append(row[0])\n intensity.append(row[1])\n xyarray = list(zip(twotheta, intensity))\n xyarray = np.asarray(xyarray)\n xyarray = xyarray.astype(np.float)\n return xyarray", "def from_file(file_handle, delimiter=\",\"):\r\n # Reading in the data\r\n line = file_handle.readline()\r\n line = line.rstrip(\"\\n\")\r\n line = line.split(delimiter)\r\n\r\n # Assigning the data\r\n label = line[0]\r\n mass = float(line[1])\r\n position = np.array(line[2:5], float)\r\n velocity = np.array(line[5:], float)\r\n\r\n return Particle3D(label, mass, position, velocity)", "def import_data(textfile, uncertainty):\n\tf = open(textfile, 'r')\n\tdata = f.readlines()\n\tx = []\n\ty = []\n\tfor line in data:\n\t\tcoords = line.strip()\n\t\tcoords = coords.split(', ')\n\t\tx.append(coords[0])\n\t\ty.append(coords[1]) \n\t\n\tx = [float(i) for i in x]\n\tx = np.array(x).reshape((10,1))\n\ty = [float(i) for i in y]\n\ty = np.array(y).reshape((10,1))\n\tdata = {'x': x, 'y': y, 'var': uncertainty}\n\n\treturn data", "def _image_set(gt_txt, images_dir):\n with open(gt_txt) as f:\n filename = f.readline().rstrip()\n total = 1\n while filename:\n log.debug(filename)\n image = Image(os.path.join(images_dir, filename), filename)\n face_num = int(f.readline().rstrip())\n\n if face_num == 0:\n log.warning('No faces for {}. Ignoring next line {}'.format(image.filename, f.readline().rstrip()))\n\n log.debug(face_num)\n for _ in range(face_num):\n anno = f.readline().rstrip().split()\n log.debug(anno)\n face = Face(anno)\n if face.is_valid():\n image.add_face(face)\n else:\n log.debug('Skipping INVALID %s from %s', face, image)\n filename = f.readline().rstrip()\n total += 1\n yield image", "def getFile(self):\r\n self.file_name=QtGui.QFileDialog.getOpenFileName(self, \"Open Image file\", self.path, \"*tif\")\r\n if self.file_name!='':\r\n \r\n self.img= skimage.io.imread(str(self.file_name), plugin='tifffile')\r\n \"\"\"sets self.img equal to the chosen image\"\"\"\r\n \r\n self.temp= interp_img(np.zeros(self.img.shape), self.zinterp)\r\n self.edge= interp_img(np.zeros(self.img.shape), self.zinterp)\r\n self.shrink= np.zeros(self.img.shape)\r\n self.count=0\r\n \r\n self.z_stack=self.img.shape[0]/2\r\n self.y_stack=self.img.shape[1]/2\r\n self.x_stack=self.img.shape[2]/2\r\n \r\n self.dispedge = to_rgb(self.img[self.z_stack])\r\n self.y_dispedge= to_rgb(self.img[:,self.y_stack,:])\r\n self.x_dispedge= to_rgb(self.img[:,:,self.x_stack])\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n \r\n self.resetImages()", "def Read_txt_Image(Name):\r\n\ttry:\r\n\t\twith open(Name, 'r') as f: #If the file already exists\r\n\t\t\tx = f.readlines() #Readd all the rows and put them in a list\r\n\texcept: \r\n\t\twith open(Name, 'w') as f: #If the file does not exist\r\n\t\t\tx = [] #Create an empty list\r\n\treturn x #This is the list that contains all the information for the file\r", "def import_cif(file,xyz='',n=[0,0,1],rep=[1,1,1],pad=0,dopt='s',lfact=1.0,tail=''):\n crys = Crystal.from_cif(file)\n lat_vec = np.array(crys.lattice_vectors)\n lat_params = crys.lattice_parameters[:3]\n pattern = np.array([[a.atomic_number]+list(lfact*a.coords_cartesian)+[a.occupancy,1.0] for a in crys.atoms])\n\n if xyz:make_xyz(xyz,pattern,lat_vec,lat_params,n=n,pad=pad,rep=rep,fmt='%.4f',dopt=dopt)\n pattern[:,1:4] = rcc.orient_crystal(pattern[:,1:4],n_u=n) #,lat_params #pattern,crys # file\n return pattern", "def main(filename, resolution=0.3, verbose=False):\n\n np.random.seed(1986) # for reproducibility\n\n image_data = io.imread(filename)\n image_data = image_data[66:532, 105:671, :]\n image_data = rgb2gray(image_data)\n image_data = rescale(image_data, resolution, mode='constant',\n preserve_range='True')\n dim = np.int(reduce(lambda x, y: x * y, image_data.shape))\n image_data = np.reshape(image_data, (dim))\n image_data = np.array(image_data, dtype='f')\n\n return image_data", "def FillBox(file):\n with open(file,'r') as f:\n info = f.readline().split()\n box_list = []\n id = 1\n I_list =[]\n for i in range(0,len(info)):\n box_list.append(box(float(info[i]),id,0,[]))\n id+=1\n for line in f:\n words = line.split()\n I_list.append(items(words[0], float(words[1])))\n return box_list, I_list", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def read_file(file_name):\n with open(file_name) as _r:\n _pixlst = []\n for line in _r:\n if line != \"P3\\n\":\n _new = (line[0:len(line)-1]).split()\n for item in _new:\n _pixlst.append(int(item)) \n return _pixlst", "def create_from_file(cls, file_path: str) -> \"ImageClassifier\":\n base_options = _BaseOptions(file_name=file_path)\n options = ImageClassifierOptions(base_options=base_options)\n return cls.create_from_options(options)", "def from_file(path, scale):\n from imageio import imread\n imgarr = imread(path)\n s = imgarr.shape\n extx, exty = (s[1] * scale) / 2, (s[0] * scale) / 2\n ux, uy = e.arange(-extx, extx, scale), e.arange(-exty, exty, scale)\n return Convolvable(data=e.flip(imgarr, axis=0).astype(config.precision),\n x=ux, y=uy, has_analytic_ft=False)", "def flow_read(filename):\n TAG_FLOAT = 202021.25\n f = open(filename,'rb')\n check = np.fromfile(f,dtype=np.float32,count=1)[0]\n assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)\n width = np.fromfile(f,dtype=np.int32,count=1)[0]\n height = np.fromfile(f,dtype=np.int32,count=1)[0]\n size = width*height\n assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)\n tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2))\n u = tmp[:,np.arange(width)*2]\n v = tmp[:,np.arange(width)*2 + 1]\n return u,v", "def read_pixelmap(filename):\n\n # Open HDF5 pixelmap file\n try:\n with h5py.File(filename, 'r') as fp:\n x = fp['x'][:]\n y = fp['y'][:]\n except:\n print(\"Error reading the pixelmap file: \", filename)\n exit()\n\n # Correct for pixel size (meters --> pixels)\n # Currently hard coded for CSPAD\n # We can figure this out from the pixel map\n dx = 110e-6\n x /= dx\n y /= dx\n \n\n # Calculate radius\n r = numpy.sqrt(numpy.square(x) + numpy.square(y))\n\n return x, y, r, dx" ]
[ "0.6697791", "0.6425697", "0.6267778", "0.60752165", "0.6063262", "0.6061974", "0.5948842", "0.5867118", "0.5839898", "0.5778474", "0.57653004", "0.56806886", "0.5678753", "0.56717163", "0.56614995", "0.5646917", "0.5627825", "0.56230605", "0.561317", "0.56094795", "0.5598929", "0.5584855", "0.55793023", "0.555096", "0.5548226", "0.5520904", "0.55174875", "0.5508754", "0.5507941", "0.55027014", "0.5484065", "0.547281", "0.5469114", "0.5464049", "0.54579043", "0.5440993", "0.5435936", "0.5434437", "0.5429371", "0.5427784", "0.54247034", "0.54014575", "0.5398013", "0.53842825", "0.53822124", "0.5375317", "0.5375015", "0.53719795", "0.53710866", "0.5342403", "0.5333103", "0.533207", "0.53250206", "0.5323032", "0.5322411", "0.53110224", "0.5306975", "0.53062236", "0.52992445", "0.52990323", "0.52979016", "0.5294942", "0.5291765", "0.5281398", "0.5280882", "0.52801126", "0.52608144", "0.5259221", "0.52562314", "0.52553976", "0.5246886", "0.5229311", "0.52275926", "0.52258754", "0.52149963", "0.521143", "0.5211314", "0.52101225", "0.5207892", "0.5185755", "0.5183702", "0.5183646", "0.5173092", "0.51724", "0.5161866", "0.51614285", "0.5160522", "0.5159585", "0.5158374", "0.515755", "0.5155839", "0.51486665", "0.514842", "0.51478046", "0.51471776", "0.5145398", "0.5138135", "0.5136398", "0.5135012", "0.5132296" ]
0.7201921
0
Extract input numbers from given string,
def get_input_from_string(str_input): from string import split res = [] list_input = split(str_input) for i in xrange(len(list_input)): res.append(float(list_input[i])) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ExtractNumbers(s):\n\n t = s.strip('[]\\n')\n comma_space = r', '\n re_comma_space = re.compile(comma_space)\n z = re_comma_space.split(t)\n #print z\n return z", "def extract_nums(string):\r\n num_list = []\r\n mods1 = string.replace('$', '')\r\n mods2 = mods1.replace('K', '')\r\n for s in mods2.split(' '):\r\n try:\r\n num_list.append(int(s))\r\n except Exception:\r\n pass\r\n return np.mean(num_list)", "def find_numbers(text):\n result = []\n for word in text.split():\n if word.isdigit():\n result.append(int(word))\n return result", "def split_num(s):\n i = 0\n while i < len(s):\n if s[i] < '0' or s[i] > '9':\n break\n i += 1\n if s[i:]:\n return (int(s[:i]), s[i:], )\n return (int(s[:i]), )", "def get_numbers(string:str, type_=\"int\") -> list:\n \n num_list = []\n for word in string.split():\n if type_ == \"int\":\n try:\n num_list.append(int(word))\n except:\n pass\n elif type_ == \"float\":\n if isfloat(word):\n num_list.append(float(word))\n return num_list", "def _parse_numbers(self, numberstr: str):\n numbers = []\n currentnumber = \"\"\n\n for c in numberstr:\n if c.isdigit() or c == '-' or c == '.':\n currentnumber += c\n elif len(currentnumber) > 0:\n numbers.append(float(currentnumber))\n currentnumber = \"\"\n if len(currentnumber) > 0:\n numbers.append(float(currentnumber))\n\n return np.array(numbers)", "def find_number(self, string):\n #string = string.encode('ascii', 'ignore')\n #return int(filter(str.isdigit, string))\n s = (re.findall('\\d+', string))\n return int(''.join(s))", "def _parseNumbers(s):\n ss = utils.unclump(s)\n\n m3 = re.match('^\\d+$', ss)\n if m3 is not None:\n return decimal.Decimal(round(float(ss), 2))\n\n m1 = re.match(r'(\\d+)\\s+(\\d)/(\\d)', ss)\n if m1 is not None:\n num = int(m1.group(1)) + (float(m1.group(2)) / float(m1.group(3)))\n return decimal.Decimal(str(round(num, 2)))\n\n m2 = re.match(r'^(\\d)/(\\d)$', ss)\n if m2 is not None:\n num = float(m2.group(1)) / float(m2.group(2))\n return decimal.Decimal(str(round(num, 2)))\n\n return None", "def parse_input(string):\n return [int(vote) for vote in string.split()]", "def split_string_at_numbers(string):\n split_list = re.compile(r'(\\d+)').split(string)\n filtered_list = []\n skip_next_loops = 0\n for i in range(len(split_list)):\n if skip_next_loops > 0:\n skip_next_loops -= 1\n continue\n part = split_list[i]\n if part.isdigit() or (part == '.' and i < len(split_list) - 1 and split_list[i + 1].isdigit()):\n # Some kind of number\n if part == '.':\n # number of format '.###' (start of string)\n part += split_list[i + 1]\n skip_next_loops = 1\n elif i < len(split_list) - 2 and split_list[i + 1] == '.' and split_list[i + 2].isdigit():\n # number of format '###.###'\n part += split_list[i + 1] + split_list[i + 2]\n skip_next_loops = 2\n elif (i > 0 and len(filtered_list) and len(filtered_list[-1]) and\n filtered_list[-1][-1] == '.'):\n # number of format '.###' (within string)\n filtered_list[-1] = filtered_list[-1][:-1]\n part = '.' + part\n # otherwise just number of format '###'\n factor = 1\n if i < len(split_list) - 1:\n # check next part for unit information\n msg = split_list[i + 1].strip()\n msg = msg.lstrip('_([{')\n msg = re.split('[^a-zA-Zµ]', msg)[0]\n if msg:\n for unit in tools.science.UNIT_SYMBOLS:\n if msg.endswith(unit):\n msg = msg[:-len(unit)]\n break\n if len(msg) == 1:\n factor = 10**tools.science.SI_PREFIXES.get(msg[0], 0)\n filtered_list.append(float(part)*factor)\n else:\n # Actual string\n filtered_list.append(part)\n return filtered_list", "def get_digits(string):\n digit_str= ''.join(filter(lambda x: x.isdigit(), string))\n return digit_str", "def split_str_digit(s):\n res = []\n for m in re.finditer('(\\d*)(\\D*)', s):\n for g in m.groups():\n if g != '':\n try:\n res.append(int(g))\n except ValueError:\n res.append(g)\n return tuple(res)", "def split_num(a_str):\n idx = None\n for i in iter(a_str):\n if i.isdigit():\n idx = a_str.index(i)\n break\n if idx == None:\n return (a_str[:idx], int('1'))\n else:\n return (a_str[:idx], int(a_str[idx:]))", "def parse_number(txt):\n return int(txt)", "def get_number(x):\n\n return re.findall(r'\\d+', x)[0]", "def extract_number(word):\n number_flag = True\n number = ''\n word = word.rstrip('.').lstrip('.')\n for char in word:\n try:\n if char == '.' and number_flag:\n number += char\n else:\n int(char)\n if number_flag:\n number += char\n except:\n if len(number) > 0:\n number_flag = False\n continue\n return number", "def extract_int(text):\n m = re.search(r\"\\d+\", text)\n if m is not None:\n return m.group(0)", "def extint(input):\n num = None\n try:\n num = int(''.join([s for s in input if s.isdigit()]))\n except ValueError:\n pass\n return num", "def get_num_from_string(string):\n\toutput = 0\n\tstring = string.lower()\n\tfor char in string:\n\t\tif char in letter_to_number:\n\t\t\toutput += letter_to_number[char]\n\t\telse:\n\t\t\tpass\n\treturn output", "def _number_finder(s, regex, numconv, py3_safe):\n\n # Split. If there are no splits, return now\n s = regex.split(s)\n if len(s) == 1:\n return tuple(s)\n\n # Now convert the numbers to numbers, and leave strings as strings\n s = remove_empty(s)\n for i in py23_range(len(s)):\n try:\n s[i] = numconv(s[i])\n except ValueError:\n pass\n\n # If the list begins with a number, lead with an empty string.\n # This is used to get around the \"unorderable types\" issue.\n # The _py3_safe function inserts \"\" between numbers in the list,\n # and is used to get around \"unorderable types\" in complex cases.\n # It is a separate function that needs to be requested specifically\n # because it is expensive to call.\n if not isinstance(s[0], py23_basestring):\n return _py3_safe([''] + s) if py3_safe else [''] + s\n else:\n return _py3_safe(s) if py3_safe else s", "def find_numbers(text):\n # -------------------------------------------------------------------------\n # Notice how expressive the list comprehension syntax is, in that it sounds\n # almost exactly the same as you would describe the problem in English.\n # I.e.\n # Convert each word to an integer, for every word in text split over\n # spaces, if the word is comprised only of digits.\n # \n # int(word) Convert each word to an integer,\n # for word for every word\n # in text.split() in text split over spaces\n # if text.isdigit() if the word is comprised only of digits.\n # -------------------------------------------------------------------------\n return [int(word) for word in text.split() if word.isdigit()]", "def extract_only_number(x):\n\tr = re.findall(r'\\d+', x)\n\tif len(r) > 0:\n\t\treturn r[0]\n\treturn 0", "def ints_in(x: str) -> list[int]:\n ex = r'(?:(?<!\\d)-)?\\d+'\n return ints(re.findall(ex, x))", "def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")", "def extractDigits(key):\n text = \"\"\n digits = \"\"\n for c in key:\n if c in \"0123456789\":\n digits += c\n else:\n text += c\n return (text, 0 if not digits else int(digits))", "def parse_list(list_str):\n return list(map(int, re.findall(r'\\d+', list_str)))", "def find_numeric(text):\n text_digits = \\\n re.sub('[^\\d]', '', text)\n if not text_digits:\n return None\n try:\n return int(text_digits)\n except ValueError:\n return None", "def extract_numbers(frame):\n numeric_const_pattern = (\n r\"[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?\"\n )\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n frame = frame.applymap(lambda x: rx.findall(x)[0])\n\n return frame", "def parse_input(userstring):\n xsplit = userstring.split()\n stringtovalues = [float(x) for x in xsplit]\n\n return stringtovalues", "def _extract_num(self, text):\n try:\n if 'studio' in text.lower():\n return 0.0\n text = text.replace(',', '')\n pattern = r'[-+]?\\d*\\.\\d+|\\d+'\n result = re.findall(pattern, text)[0]\n return float(result)\n except:\n return np.nan", "def parseNumList(input):\n\tm = re.match(r'(\\d+)(?:-(\\d+))?(?:-(\\d+))?$', input)\n\t# ^ (or use .split('-'). anyway you like.)\n\tif not m:\n\t\traise ArgumentTypeError(\"'\" + input + \"' is not a range of number. Expected forms like '1-5' or '2' or '10-15-2'.\")\n\tstart = int(m.group(1))\n\tend = int(m.group(2))\n\tif m.group(3):\n\t\tincrement = int(m.group(3))\n\telse:\n\t\tincrement = 1\n\treturn list(range(start, end+1, increment))", "def find_int_in_str(s: str) -> int:\n \n i = int(re.search(r'\\d+', s).group())\n\n return i", "def parse_sub_num(s, parser):\n s = s.strip()\n if s == '*':\n return s\n nums = s.split(',')\n msg = 'Invalid sub-entry number.'\n res = set()\n for num in nums:\n num = num.strip()\n if num.isdigit():\n try:\n num = int(num)\n assert num > 0\n res.add(num)\n except:\n raise parser.error(msg)\n else:\n try:\n m = re.search('(\\d+)-(\\d+)', num)\n if m is None:\n raise parser.error(msg)\n else:\n a = int(m.group(1))\n b = int(m.group(2))\n assert a > 0\n assert b > 0\n assert a <= b\n r = range(a, b + 1)\n res.update(r)\n except:\n raise parser.error(msg)\n res = list(res)\n res.sort()\n return res", "def extract_digits(cls, phone_number):\n extracted_num = \"\"\n for ch in phone_number:\n if ch in cls.INTEGER_STRING:\n extracted_num += ch\n return extracted_num", "def parse_proasis(input_string):\n return (\n input_string[:3].strip(),\n int(input_string[5:].strip()),\n input_string[3:5].strip(),\n )", "def num(s, filt=float):\n if not s:\n return \"\"\n try:\n return filt(s)\n except ValueError:\n return \"\"", "def getNumFromString(self, string):\n \n m = re.search(r'\\d+$', string)\n if m is not None:\n return int(m.group())\n else:\n return 0", "def get_number(word):\n return int(re.match(NUMBER, word).group(1))", "def parse_pint_string(self, pint_string):\n val = pint_string.split(' ')[0]\n units = pint_string.split(val+' ')[-1]\n return val, units", "def extract_numbers_safe(cls, s, decimals=False):\n if decimals:\n tmp = ''.join([i for i in cls.escape(s) if ((i >= '0') and (i <= '9') or i == '.')])\n\n parts = tmp.split('.')\n\n try:\n output = '{a}.{b}'.format(a=parts[0], b=parts[1])\n except IndexError:\n output = parts[0]\n\n else:\n output = ''.join([i for i in cls.escape(s) if (i >= '0') and (i <= '9')])\n\n try:\n if s[0] == '-':\n output = '-{s}'.format(s=output)\n except:\n pass\n\n return output", "def integers_only(text) -> str:\n return ''.join(x for x in text if x.isdigit())", "def parse_number():\n nonlocal idx\n num = \"\"\n def parse_digits():\n nonlocal idx\n num = \"\"\n while idx < len(source) and is_num_char(source[idx]):\n num += source[idx]\n idx += 1\n return num\n # Parse initial numbers\n oidx = idx\n num += parse_digits()\n if idx < len(source) and source[idx] == '.': # if we find a dot\n # Parse out the second part of the number string\n idx += 1\n num += (\".\" + parse_digits())\n if idx < len(source) and not terminal(source[idx]): # the number didn't terminate... this is an identifier\n idx = oidx\n return parse_symbol()\n idx -= 1 # Backtrack, bc last character is *invalid* and loop assumes we stop on a valid token character\n return num", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def _get_numbers(first=False):\n numbers = [int(v) for v in stdin.readline().split()]\n return numbers[0] if first else numbers", "def split_number(string):\n\ttry:\n\t\tparts = string.split('-')\n\texcept AttributeError:\n\t\ttry:\n\t\t\tstring * string\n\t\t\treturn ('', string)\n\t\texcept TypeError:\n\t\t\treturn None\n\t\n\t\t\n\tend = parts[-1]\n\tif '.' in end:\n\t\ttry:\n\t\t\tnum = float(end)\n\t\texcept:\n\t\t\tnum = None\n\telse:\n\t\ttry:\n\t\t\tnum = int(end)\n\t\texcept:\n\t\t\tnum = None\n\tif num is not None:\n\t\tparts.pop(-1)\n\treturn ('-'.join(parts), num)", "def string_to_digit(string, output):\n string = strip_space(string)\n if not string[0].isdigit() and not string[1].isdigit():\n return None\n\n string_items = []\n for index, item in enumerate(string):\n if item.isdigit():\n string_items.append(item)\n else:\n if item == ',':\n string_items.append('.')\n\n elif item == ' ' and string[index + 1].isdigit():\n pass\n\n elif not item.isdigit() and not string[index + 1].isdigit():\n break\n\n if '.' in string_items and output == int:\n return int(float(''.join(string_items)))\n\n return output(''.join(string_items))", "def get_nummeric_only(text):\n\n nummeric_string =\"\"\n \n for character in text:\n if character.isnumeric():\n \n nummeric_string+=character\n \n return nummeric_string", "def parse_input_1() -> List[int]:\n with open(\"./data/day_13.txt\", \"r\") as f:\n lines = [int(n) for line in f for n in line.split(\",\") if re.match(\"^\\d+\", n)]\n return lines", "def _parseNumber(self, str):\r\n\t\tif (str.count(\".\") == 0):\r\n\t\t\treturn int(str)\r\n\t\tif (str.count(\".\") == 1):\r\n\t\t\treturn float(str)\r\n\t\treturn str", "def __get_num_from_str(elements: list, string: str) -> str:\n\n num = list()\n\n element_list = list(elements)\n\n for atom in string.split('-'):\n\n if atom == '*':\n num.append('0')\n else:\n num.append(f'{element_list.index(atom) + 1}')\n\n return ' '.join(num)", "def parse_numeric(numeric: str):\r\n if numeric[0] == '-':\r\n polarity = 1\r\n numeric = numeric.lstrip('-')\r\n numeric = numeric.lstrip('0')\r\n else:\r\n polarity = 0\r\n\r\n digits = []\r\n for character in numeric:\r\n try:\r\n digits.append(int(character, 10))\r\n except ValueError:\r\n # (Bases 11 - 16) Hexadecimal replacement characters.\r\n if character in inv_extended_numerals.keys():\r\n digits.append(inv_extended_numerals[character.lower()])\r\n else:\r\n raise ValueError('Invalid numeric string. Can only use digits \\\r\n0 through 9 and hexadecimal a through f.')\r\n\r\n if len(digits) < 1:\r\n raise ValueError('Invalid numeric string. Must supply at least one digit.')\r\n else:\r\n return digits, polarity", "def string_to_numbers(str):\n return [ord(ch) - ord(\"a\") for ch in str]", "def extract_integers_from_argument(arg, expected=0):\n\n ss = arg.split(\",\")\n\n ns = [ int(s.strip()) for s in ss ]\n\n if ( expected > 0 and len(ns) != expected ):\n raise Exception(\"Expecting {} integers. {} extracted from {}. \".format(expected, len(ns), arg))\n\n return ns", "def words_to_numbers(text):\n number_found = 3\n for word, number in number_map.items():\n if word in text or str(number) in text:\n text = text.replace(word, str(number))\n number_found = number\n break\n return [text, number_found]", "def parseNumbers(equation):\r\n queue = createQueue()\r\n stack = None\r\n parts = equation.split(' ')\r\n for part in parts:\r\n enqueue(queue, part)\r\n stack = push(stack, part)\r\n return stack, queue", "def get_number(word, i_type='S'):\n\n resultdict = {}\n if word is None:\n return resultdict\n\n word = str(word)\n regexStr = None\n if i_type == 'S':\n regexStr = re.search(r'^[0-9\\-]+', word)\n else:\n regexStr = re.search(r'[0-9\\-]+', word)\n\n if regexStr is not None:\n # pdb.set_trace()\n numList = []\n if '-' in word:\n numList = word.split('-')\n else:\n numList.append(word)\n\n for idx, numWord in enumerate(numList):\n if idx > 1:\n resultdict = {}\n break\n \"\"\"\n Let's get number and suffix for number1\n and number2\n \"\"\"\n # to get the number\n regexNum = re.search(r'[0-9]+', numWord)\n key = 'number_' + str(idx + 1)\n if regexNum is not None:\n try:\n resultdict[key] = int(regexNum.group().split(' ')[0])\n except:\n pass\n # resultdict[key] = regexNum.group().split(' ')[0]\n\n # to get suffix\n regexSuff = re.search(r'[a-zA-Z]+', numWord)\n key = key + '_suff'\n if regexSuff:\n # resultdict[key] = regexSuff.group().split(' ')[0]\n \"\"\"\n dont think we should have suffix more than 1\n character\n there are few cases but we are ignoring them...\n \"\"\"\n suff = regexSuff.group().split(' ')[0]\n if i_type == 'S':\n if len(suff) == 1:\n resultdict[key] = suff\n else:\n resultdict = {}\n else:\n if len(suff) < 3:\n resultdict[key] = suff\n\n return resultdict", "def filter_and_sort_number_strings_as_numbers():\n# fill it out\n result = []\n for s in STRING_LIST:\n if (s.isnumeric()):\n result.append(s)\n return sorted(result, key = lambda s: int(s))", "def intparse(text):\n return int(text, 0)", "def find_only_numbers(detected_message_with_numbers):\n detected_message_only_numbers = re.sub(r\"[^\\d \\._]\", \"\", detected_message_with_numbers)\n return \" \".join(split_words(detected_message_only_numbers, only_unique=True))", "def parse_input(part=1):\n with open(\"input_23.txt\") as f:\n return [int(x) for x in f.readline().strip()]", "def _matching_ints(strings, regex):\n ints = []\n p = re.compile(regex)\n for string in strings:\n m = p.match(string)\n if m:\n integer = int(m.group(1))\n ints.append(integer)\n ints.sort()\n return ints", "def letter_to_num(string, dict_):\n patt = re.compile('[' + ''.join(dict_.keys()) + ']')\n num_string = patt.sub(lambda m: dict_[m.group(0)] + ' ', string)\n num = [int(i) for i in num_string.split()]\n return num", "def convert_numerals(input_str):\n # credit to: http://code.activestate.com/recipes/81611-roman-numerals/\n copy = input_str[:]\n copy = copy.split(\" \")\n\n nums = ['m', 'd', 'c', 'l', 'x', 'v', 'i']\n ints = [1000, 500, 100, 50, 10, 5, 1]\n places = []\n\n for i in range(len(copy)):\n is_valid = True\n\n if \".\" in copy[i]:\n copy[i] = copy[i].replace(\".\", \"\")\n else:\n # . must be appended to end of string to signify it is a roman\n # numeral\n is_valid = False\n\n if \"xix\" in copy[i] or \"xviii\" in copy[i]:\n is_valid = True\n\n for c in copy[i].lower():\n if c not in nums:\n # return original\n is_valid = False\n\n if is_valid is False:\n continue\n\n for char_index in range(len(copy[i])):\n c = copy[i][char_index].lower()\n value = ints[nums.index(c)]\n # If the next place holds a larger number, this value is negative.\n try:\n nextvalue = ints[nums.index(copy[i][char_index + 1].lower())]\n if nextvalue > value:\n value *= -1\n except IndexError:\n # there is no next place.\n pass\n places.append(value)\n\n out = 0\n\n for n in places:\n out += n\n\n copy[i] = str(out)\n\n return \" \".join(copy)", "def parse_reading(data):\n pat =re.compile('([1-9][0-9]*)')\n datum = data.split('\\n')\n #print datum\n for d in datum:\n m = pat.search(d)\n if m is not None:\n return float(m.group(1))\n return float(-1)", "def natural_key(string_):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_)]", "def natural_key(string_):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_)]", "def parse_input_2() -> List[int]:\n with open(\"./data/day_13.txt\", \"r\") as f:\n lines = [\n int(n) if re.match(\"^\\d+\", n) else 1 for line in f for n in line.split(\",\")\n ]\n return lines[1:]", "def _parse_number(stream):\n rv = \"\"\n while stream.peek() and stream.peek() in string.digits:\n rv += stream.next()\n\n return int(rv)", "def parse_selection(selection_str: str) -> List[int]:\n indices = []\n for group in selection_str.split(','):\n if not re.match(r'^(?:-?\\d+)|(?:\\d+(?:-\\d+))$', group):\n print(\"Invalid selection\", group)\n sys.exit()\n spl = group.split('-')\n if len(spl) == 1:\n indices.append(int(spl[0]))\n elif len(spl) == 2:\n begin = int(spl[0]) if spl[0] else 0\n end = int(spl[1])\n indices.extend(range(begin, end + 1))\n return indices", "def phoneNumberExtractor(self,data):\n\t\tdata = data.replace(\"\\r\", \" \")\n\t\tdata = data.replace(\"\\r\\n\", \" \")\n\n\t\t#first is identifying 10 digits code\n\t\tdata = data.split()\n\t\tresult = []\n\t\tfor word in data:\n\t\t\tres = None\n\t\t\tres = word if word.isdecimal() and len(word) == 10 and not res else res\n\t\t\tres = word[2:] if word.isdecimal() and len(word) == 12 and not res else res\n\t\t\tres = word[3:] if word[3:].isdecimal() and len(word) == 10 and not res else res\n\t\t\tif (\"(\" and \")\") in word or \"-\" in word:\n\t\t\t\tword = word.replace(\"(\",\"\")\n\t\t\t\tword = word.replace(\")\",\"\")\n\t\t\t\tword = word.replace (\"-\",\"\")\n\t\t\t\tres = word if(len(word) == 10) else None\n\t\t\tif res:\n\t\t\t\tresult.append(res)\n\t\t\t\tdel(res)\n\t\treturn set(result)", "def is_num_int_automata(final_word: str) -> list:\n\n if re.match(\"^(0|[1-9]\\d*)$\", final_word):\n token = 'num_int'\n return [True, token]\n else :\n return [False, \"\"]", "def extract_numbers_nl(text, short_scale=True, ordinals=False):\n results = _extract_numbers_with_text_nl(tokenize(text),\n short_scale, ordinals)\n return [float(result.value) for result in results]", "def _consume_number(infix_string:str,index:int,output:list) -> int:\r\n if not (infix_string[index].isdigit() or infix_string[index]==Roll._minus): # handle integers and dice rolls ('XdY')\r\n raise ValueError(f\"Unexpected value in number token '{infix_string[index]}'\")\r\n digit = \"\"\r\n has_mandatory_segment=False\r\n if infix_string[index]==Roll._minus:\r\n sign=1\r\n while index<len(infix_string) and infix_string[index]==Roll._minus:\r\n sign*=-1\r\n index+=1\r\n if sign<0:\r\n digit+=Roll._minus\r\n while index<len(infix_string) and infix_string[index].isdigit():\r\n has_mandatory_segment=True\r\n digit+=infix_string[index]\r\n index+=1\r\n if index<len(infix_string) and infix_string[index].lower()==Roll._dice_sep:\r\n digit+=infix_string[index].lower()\r\n index+=1\r\n has_mandatory_segment = False\r\n while index<len(infix_string) and infix_string[index].isdigit():\r\n has_mandatory_segment=True\r\n digit+=infix_string[index]\r\n index+=1\r\n if not has_mandatory_segment:\r\n raise ValueError(\"Dice rolls must be supplied with a fixed number of sides (format: 'XdY')\")\r\n output.append(digit)\r\n return index", "def split_str(str):\n \n logger = logging.getLogger(__name__)\n \n logger.debug('{0}'.format(str))\n \n match = re.match(r\"([0-9]+.?\\d{0,32}?)(d|m|s)\", str)\n \n if match:\n items = match.groups()\n \n return items[0], items[1]", "def parse_int_list(input_str):\n return [int(part) for part in input_str.split(\",\")]", "def convert_string_to_version_component_numbers(string: str) \\\n -> Optional[Sequence[int]]:\n\n def _get_valid_version(number: str) -> int:\n \"\"\"Get positive version from the number string.\"\"\"\n version = int(number)\n if version >= 0:\n return version\n raise ValueError(\"Each number version must be positive!\")\n\n try:\n component = list(map(_get_valid_version, string.strip().split('.')))\n return tuple(component[:3] + [0 for _ in range(3 - len(component))])\n except ValueError as e:\n exception(f'Invalid input: {string}\\n' + str(e))\n return None", "def get_number(text):\n# if (isinstance(text, str) or isinstance(text, unicode)):\n if True:\n text.replace(\",\",\".\")\n text = re.sub(\"\\xa0\",\"\", text)\n rst = re.findall(\"[0-9]+\\.{0,1}[0-9]*\", text)\n if rst:\n rst = rst[0]\n else:\n rst = \"nan\"\n else:\n rst = text\n try:\n rst = float(rst)\n except:\n rst = float(\"nan\")\n return(rst)", "def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0", "def _parse_scan_number(self, string):\n \n # match scan number pattern\n match = SCAN_NUMBER_PATTERN.search(string)\n if not match:\n return None\n \n # return as int\n return int(match.group(1))", "def process_elem_string(string):\r\n\r\n elem = \"\"\r\n for i, c in enumerate(string):\r\n try:\r\n int(c)\r\n break\r\n except:\r\n elem += c\r\n\r\n return elem", "def _replace_numbers(self, data_item):\n\t\tnumber_pattern = re.compile(\"\\d+,\\d+|\\d+\\.\\d+|\\d+|\\d+\\.\\d+%?|\\d+%?\")\n\t\tnums = []\n\t\tinput_seq = []\n\t\t\n\t\tword_tokens = data_item.problem.strip().split()\n\n\t\t# Looking for numbers.\n\t\tfor word_token in word_tokens:\n\t\t\tnumbers_match = re.search(number_pattern, word_token)\n\t\t\tif numbers_match is not None:\n\t\t\t\t# If there are digits in the token, we need to replace them.\n\t\t\t\tif numbers_match.start() > 0:\n\t\t\t\t\tinput_seq.append(word_token[:numbers_match.start()])\n\n\t\t\t\tnum = word_token[numbers_match.start(): numbers_match.end()]\n\n\t\t\t\tnums.append(num) #.replace(\",\", \"\"))\n\t\t\t\tinput_seq.append(DataProcessor.NUM_REPLACER)\n\t\t\t\tif numbers_match.end() < len(word_token):\n\t\t\t\t\tinput_seq.append(word_token[numbers_match.end():])\n\t\t\telse:\n\t\t\t\t# There are no digits in the token, we can safely append it to the input sequence.\n\t\t\t\tinput_seq.append(word_token)\n\n\t\treturn nums, input_seq", "def challengeInput(self):\r\n firstLine = input().split(' ')\r\n secondLine = input().split(' ')\r\n numbers = []\r\n for s in firstLine:\r\n numbers.append(float(s) )\r\n for s in secondLine:\r\n numbers.append(float(s) )\r\n return numbers", "def str2num(s):\n if s.is_constant() and (s.is_float() or s.is_integer()):\n return True, s.compute_value()\n elif s.functor == \".\":\n values = term2list(s)\n numvalues = []\n for value in values:\n if isinstance(value, int) or isinstance(value, float):\n numvalues.append(value)\n else:\n return None, None\n return True, tuple(numvalues)\n else:\n return None, None", "def filter_and_sort_number_strings():\n# fill it out\n result = []\n for s in STRING_LIST:\n if (s.isnumeric()):\n result.append(s)\n return sorted(result)", "def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]", "def question_11(int_string: str) -> int:\n counter = 0\n for i in int_string:\n if i.isdigit():\n counter += 1\n else:\n continue\n return counter", "def _listify_input(self, input_string):\n stripped_string = re.sub(r'\\s+', '', input_string.strip())\n split_list = stripped_string.split(\",\")\n return [(x[0], int(x[1::])) for x in split_list]", "def letter_to_num(self, string, dict_):\n #dict_= {'A': '0', 'C': '1', 'D': '2', 'E': '3', 'F': '4', 'G': '5', 'H': '6', 'I': '7', 'K': '8', 'L': '9', 'M': '10', 'N': '11', 'P': '12', 'Q': '13', 'R': '14', 'S': '15', 'T': '16', 'V': '17', 'W': '18', 'Y': '19'}\n patt = re.compile('[' + ''.join(dict_.keys()) + ']')\n num_string = patt.sub(lambda m: dict_[m.group(0)] + ' ', string)\n #print(num_string)\n #print(type(num_string))\n num = [int(i) for i in num_string.split()]\n return num", "def clean_eval(exp):\n\n # Split expression using '+' as our split token\n number_string = exp.split(\"+\")\n total = int()\n\n # Cost each number string to int, cleaning up leading zeros, then total\n for num in number_string:\n total += int(num)\n\n return total", "def numbers_check(string, logger_=_LOGGER):\n valid_regex_0 = r\"\\d\"\n valid_regex_1 = r\"_\\d+_\\d+_\"\n valid_regex_2 = r\"_\\d+_\"\n if not re.search(valid_regex_0, string):\n logger.log(\n level=\"warning\",\n message='There are no numbers in the string \"' + string + '\"',\n logger=logger_,\n )\n return string\n if re.search(valid_regex_1, string):\n return string\n elif re.search(valid_regex_2, string):\n return string\n else:\n logger.log(\n level=\"warning\",\n message='Numbers not in valid expression. Valid values are \"_(['\n '0-9]+)_([0-9]+)_\" or \"_([0-9]+)_\"',\n logger=logger_,\n )\n return string", "def stringToInts(string):\r\n ints=[];\r\n for char in string:\r\n ints.append(charToInt(char));\r\n return ints;", "def netflix_read(string):\n val = -1\n ind = -1\n string = string.strip()\n if string.isdigit():\n val = int(string)\n ind = 0\n elif string:\n val = int(string.strip(':'))\n ind = 1\n return (val, ind)", "def parse_number(self, word_list):\n\n self.skip(word_list, 'stop')\n\n next_word = self.peek(word_list)\n\n if next_word == 'number':\n return self.match(word_list, 'number')\n\n else:\n raise ParserError('Expected a number. Got a %s' % next_word)", "def numDecodings(self, s):\n if not s or s[0] == '0':return 0\n s1,s2 = 1,1\n for m in xrange(1,len(s)):\n if s[m] == '0':s2 = 0\n if s[m-1] == '1' or (s[m-1] == '2' and s[m] <= '6'):\n s2 += s1\n s1 = s2 - s1\n else:\n s1 = s2\n if s2 == 0:return 0\n return s2", "def _get_number_from_string(x):\n try:\n return float(x)\n except ValueError:\n raise ValueError('Unknown element')", "def numbers_in_line(line):\n ret = []\n pos = 0\n while True:\n loc = find_next_number(line, pos)\n if not loc:\n break\n ret.append(loc)\n pos = loc[1]\n return ret", "def sort_numbers(self, value): \n\n\t\tnumbers = re.compile(r'(\\d+)')\n\t\tparts = numbers.split(value)\n\t\tparts[1::2] = map(int, parts[1::2])\n\t\treturn parts", "def coords1(s: str) -> list[float]:\n return numarray(re.sub(SPACE, \"\", s).split(\",\"))", "def key_to_numeric(x):\n reg = re.compile(r'\\[(\\d+\\_?(\\d+)?)[a-z]?\\]')\n inspect = reg.search(x).groups(0)[0]\n if '_' in inspect:\n left, right = inspect.split('_')\n return int(left), int(right)\n else:\n return int(inspect), 0", "def split_numeric(self, text, parse=True):\n\n block = ''\n block_numeric = self.isnum(text[0])\n output = []\n for t in text:\n if self.isnum(t) == block_numeric:\n block += t\n else:\n if block_numeric:\n block = float(block)\n output.append(block)\n block = t\n block_numeric = self.isnum(t)\n if block_numeric:\n block = float(block)\n output.append(block)\n return output" ]
[ "0.7238783", "0.7173874", "0.7057713", "0.68924135", "0.68546575", "0.67531013", "0.67403173", "0.6647867", "0.657946", "0.6577491", "0.65233153", "0.65192056", "0.6391088", "0.638495", "0.6383692", "0.6383132", "0.6349406", "0.6332319", "0.6319145", "0.6285752", "0.6280346", "0.6221941", "0.62218493", "0.621139", "0.6188021", "0.61785156", "0.617077", "0.61674863", "0.61641425", "0.6102309", "0.61012894", "0.5982365", "0.5971672", "0.59562415", "0.5954015", "0.5953537", "0.5947074", "0.592817", "0.5905547", "0.5884285", "0.58802795", "0.5879899", "0.58785546", "0.5875031", "0.58504975", "0.5846208", "0.584069", "0.58372575", "0.5814535", "0.5814364", "0.5813299", "0.57896495", "0.5780152", "0.5776927", "0.57666546", "0.5760409", "0.57532895", "0.5742961", "0.5741771", "0.573804", "0.5728568", "0.57282877", "0.5719645", "0.57176596", "0.57123274", "0.57123274", "0.56728715", "0.5665439", "0.5662776", "0.5656335", "0.5653612", "0.56500393", "0.5642864", "0.56192243", "0.56083894", "0.5594436", "0.55940175", "0.55784106", "0.55755156", "0.5568493", "0.5561886", "0.5559031", "0.55586475", "0.5553483", "0.5553385", "0.55485785", "0.5547458", "0.55418897", "0.5530719", "0.55271083", "0.55267364", "0.5525461", "0.5523915", "0.5510533", "0.55050486", "0.5497604", "0.5494639", "0.54942364", "0.5492855", "0.54928243" ]
0.67156774
7
Calculate and print the descriptive statistics of an image.
def info(image, mask=None, Comment=""): if(Comment): print " *** ", Comment e = get_image(image) [mean, sigma, imin, imax] = Util.infomask(e, mask, True) nx = e.get_xsize() ny = e.get_ysize() nz = e.get_zsize() if (e.is_complex()): s = "" if e.is_shuffled(): s = " (shuffled)" if (e.is_fftodd()): print "Complex odd image%s: nx = %i, ny = %i, nz = %i" % (s, nx, ny, nz) else: print "Complex even image%s: nx = %i, ny = %i, nz = %i" % (s, nx, ny, nz) else: print "Real image: nx = %i, ny = %i, nz = %i" % (nx, ny, nz) print "avg = %g, std dev = %g, min = %g, max = %g" % (mean, sigma, imin, imax) return mean, sigma, imin, imax, nx, ny, nz
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def display_metrics2(self):\n messagebox.showinfo(\"Original Image Metrics\", self.raw_metrics)", "def testSampleImageStats(self):\n \n imgfiles = []\n imgfiles.append(\"v1_i1_g_m400_s20_f.fits\")\n imgfiles.append(\"v1_i1_g_m400_s20_u16.fits\")\n imgfiles.append(\"v1_i2_g_m400_s20_f.fits\")\n imgfiles.append(\"v1_i2_g_m400_s20_u16.fits\")\n imgfiles.append(\"v2_i1_p_m9_f.fits\")\n imgfiles.append(\"v2_i1_p_m9_u16.fits\")\n imgfiles.append(\"v2_i2_p_m9_f.fits\")\n imgfiles.append(\"v2_i2_p_m9_u16.fits\")\n\n afwdataDir = os.getenv(\"AFWDATA_DIR\")\n if not afwdataDir:\n print >> sys.stderr, \"Skipping tests as afwdata is not setup\"\n return\n \n for imgfile in imgfiles:\n \n imgPath = os.path.join(afwdataDir, \"Statistics\", imgfile)\n\n # get the image and header\n dimg = afwImage.DecoratedImageF(imgPath)\n fitsHdr = dimg.getMetadata()\n\n # get the true values of the mean and stdev\n trueMean = fitsHdr.getAsDouble(\"MEANCOMP\")\n trueStdev = fitsHdr.getAsDouble(\"SIGCOMP\")\n\n # measure the mean and stdev with the Statistics class\n img = dimg.getImage()\n statobj = afwMath.makeStatistics(img, afwMath.MEAN | afwMath.STDEV)\n mean = statobj.getValue(afwMath.MEAN)\n stdev = statobj.getValue(afwMath.STDEV)\n\n # print trueMean, mean, trueStdev, stdev\n self.assertAlmostEqual(mean, trueMean, 8)\n self.assertAlmostEqual(stdev, trueStdev, 8)", "def summary(self):\r\n self.base.summary()\r\n self.extra_layers.summary()\r\n self.detector.summary()", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def show_info(filename, abs_=None, center=None):\n fimage = FITSImage(filename)\n print(\"Image data shape: {0}\".format(fimage.shape))\n print(\"Image size: %dx%d\" % (fimage.Nx, fimage.Ny))\n print(\"Data unit: [%s]\" % fimage.bunit)\n pixelsize = fimage.pixelsize\n if pixelsize:\n print(\"Pixel size: %.1f [arcsec]\" % pixelsize)\n print(\"Field of view: (%.2f, %.2f) [deg]\" % fimage.fov)\n data = fimage.image\n if abs_:\n data = np.abs(data)\n if center:\n print(\"Central box size: %d\" % center)\n rows, cols = data.shape\n rc, cc = rows//2, cols//2\n cs1, cs2 = center//2, (center+1)//2\n data = data[(rc-cs1):(rc+cs2), (cc-cs1):(cc+cs2)]\n min_ = np.nanmin(data)\n max_ = np.nanmax(data)\n mean = np.nanmean(data)\n median = np.nanmedian(data)\n std = np.nanstd(data)\n iqr = np.diff(np.nanpercentile(data, q=(25, 75)))\n mad = np.nanmedian(np.abs(data - median))\n rms = np.sqrt(np.nanmean(data**2))\n print(\"min: %13.6e\" % min_)\n print(\"max: %13.6e\" % max_)\n print(\"range: %13.6e (max - min)\" % (max_ - min_))\n print(\"mean: %13.6e\" % mean)\n print(\"median: %13.6e\" % median)\n print(\"std: %13.6e (standard deviation)\" % std)\n print(\"iqr: %13.6e (interquartile range)\" % iqr)\n print(\"mad: %13.6e (median absolute deviation)\" % mad)\n print(\"rms: %13.6e (root-mean-squared)\" % rms)", "def image_summary(tag, image):\n tag = _clean_tag(tag)\n image = _prepare_image(image)\n image = _make_image(image)\n return Summary(value=[Summary.Value(tag=tag, image=image)])", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def image_summary(self, tag, images, step):\n\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object as a Summary value\n with self.writer.as_default():\n tf.summary.image(name='%s/%d' % (tag, i), data=s.getvalue(), step=step)\n\n # Create and write Summary\n self.writer.flush()", "def get_statistics(path):\n images, masks = get_dataset(path)\n buildings = 0\n background = 0\n water = 0\n mean = np.zeros(3)\n std = np.zeros(3)\n\n with tqdm(\n total=len(images), desc=\"Getting statistics..\", leave=False, position=0\n ) as pbar:\n for i, m in zip(images, masks):\n image = Image.open(i)\n stat = ImageStat.Stat(image)\n mean = np.add(np.asarray(stat.mean), mean)\n std = np.add(np.asarray(stat.stddev), std)\n\n mask = Image.open(m)\n\n for c in mask.getcolors():\n if c[1] == 0:\n background += c[0]\n\n if c[1] == 127:\n water += c[0]\n\n if c[1] == 255:\n buildings += c[0]\n pbar.update()\n\n mean = np.divide(mean, len(images))\n std = np.divide(std, len(images))\n\n all_pixels = buildings + background + water\n buildings_perc = (buildings / all_pixels) * 100\n water_perc = (water / all_pixels) * 100\n background_perc = (background / all_pixels) * 100\n\n filename = os.path.join(path, \"myfile.txt\")\n\n with open(filename, \"w\") as file:\n file.write(\"Mean: {}\\n\".format(mean))\n file.write(\"Standard deviation: {}\\n\".format(std))\n\n file.write(\"Building pixels: {}\\n\".format(buildings))\n file.write(\"Water pixels: {}\\n\".format(water))\n file.write(\"Background pixels: {}\\n\".format(background))\n file.write(\"Building percentage: {}\\n\".format(buildings_perc))\n file.write(\"Water percentage: {}\\n\".format(water_perc))\n file.write(\"Background percentage: {}\\n\".format(background_perc))\n\n with open(filename, \"r\") as file_r:\n print(file_r.read())", "def dataset_statistics(dataset):\n print (dataset.describe())", "def get_image_stats(image, out_dir, cur_file):\n # Output directory\n output_base = osp.join(out_dir, cur_file.split('.')[0])\n os.mkdir(output_base)\n # Print dimensions of the image\n width, height, color = image.shape\n print('The resolution of the image if of {}x{}x{}'.format(width,\n height,\n color))\n print('Total of {} pixels'.format(width * height * color))\n\n # Get histogram\n print('Calculating histogram')\n flat_img = image.mean(axis=2).flatten()\n counts, bins = np.histogram(flat_img, range(257))\n plt.bar(bins[:-1], counts, width=1, edgecolor='none')\n output_file = osp.join(out_dir, output_base, 'histogram.png')\n plt.xlabel('Intensidad')\n plt.ylabel('Número de pixeles')\n print('Saving histogram')\n plt.savefig(output_file, bbox_inches='tight')\n plt.close()\n\n # LAB space\n lab_image = cv2.cvtColor(image[8000:8500, 8000:8500, :], cv2.COLOR_BGR2LAB)\n output_file = osp.join(out_dir, output_base, 'lab.png')\n cv2.imwrite(output_file, lab_image)\n output_file = osp.join(out_dir, output_base, 'original.png')\n cv2.imwrite(output_file, image[8000:8500, 8000:8500, :])", "def display_metrics3(self):\n messagebox.showinfo(\"Processed Image Metrics\", self.pro_metrics)", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def dataset_statistics(dataset):\n print(dataset.describe())", "def dataset_statistics(dataset):\n print(dataset.describe())", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def _print_img_size(self, img):\n width, height = img.size\n print('{}, {}'.format(width, height))", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def summary_stats(tile_summary):\n return \"Original Dimensions: %dx%d\\n\" % (tile_summary.orig_w, tile_summary.orig_h) + \\\n \"Original Tile Size: %dx%d\\n\" % (tile_summary.orig_tile_w, tile_summary.orig_tile_h) + \\\n \"Scale Factor: 1/%dx\\n\" % tile_summary.scale_factor + \\\n \"Scaled Dimensions: %dx%d\\n\" % (tile_summary.scaled_w, tile_summary.scaled_h) + \\\n \"Scaled Tile Size: %dx%d\\n\" % (tile_summary.scaled_tile_w, tile_summary.scaled_tile_w) + \\\n \"Total Mask: %3.2f%%, Total Tissue: %3.2f%%\\n\" % (\n tile_summary.mask_percentage(), tile_summary.tissue_percentage) + \\\n \"Tiles: %dx%d = %d\\n\" % (tile_summary.num_col_tiles, tile_summary.num_row_tiles, tile_summary.count) + \\\n \" %5d (%5.2f%%) tiles >=%d%% tissue\\n\" % (\n tile_summary.high, tile_summary.high / tile_summary.count * 100, TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >=%d%% and <%d%% tissue\\n\" % (\n tile_summary.medium, tile_summary.medium / tile_summary.count * 100, TISSUE_LOW_THRESH,\n TISSUE_HIGH_THRESH) + \\\n \" %5d (%5.2f%%) tiles >0%% and <%d%% tissue\\n\" % (\n tile_summary.low, tile_summary.low / tile_summary.count * 100, TISSUE_LOW_THRESH) + \\\n \" %5d (%5.2f%%) tiles =0%% tissue\" % (tile_summary.none, tile_summary.none / tile_summary.count * 100)", "def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):\n if mean:\n mean_ = np.mean(array)\n median = np.median(array)\n mini = np.min(array)\n maxi = np.max(array)\n first_qu = np.percentile(array, 25)\n third_qu = np.percentile(array, 75)\n\n if verbose:\n if mean:\n label += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / '\n label += '3rd QU={:.1f} / max={:.1f}'\n print(label.format(mini, first_qu, mean_, median, third_qu, maxi))\n else:\n label += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} '\n label += '/ max={:.1f}'\n print(label.format(mini, first_qu, median, third_qu, maxi))\n\n if plot:\n boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.')\n\n if mean:\n return mini, first_qu, mean_, median, third_qu, maxi\n else:\n return mini, first_qu, median, third_qu, maxi", "def describe(self, image, mask=None):\n histogram = cv2.calcHist([image], [0, 1, 2], mask, self.bins, [0, 256, 0, 256, 0, 256])\n cv2.normalize(histogram, histogram)\n\n return histogram.flatten()", "def estimateInfo(Analysis, ImageData, diffract, print_opt=False): \n total_images = ImageData['totalImages'] \n\n for key in Analysis:\n Analysis[key]['cones'] = [1,2,3,4,5,6]\n Analysis[key]['info'] = np.zeros(len(\n Analysis[key]['cones']))\n\n for amp in ImageData['rawAmp']:\n\n for key in Analysis:\n ind = len(diffract['cpd'])\n fooInfo = info.SingleConeEntropyFunc((amp[ind] ** 2 *\n Analysis[key]['retina']), \n Analysis[key]['cones']) \n Analysis[key]['info'] += fooInfo / total_images\n\n if print_opt == True:\n print ' '\n print 'Information'\n print '------------'\n for key in Analysis:\n print key, ': ', Analysis[key]['info']\n\n return Analysis", "def print_results(self, final_table=None):\n\n assert self.info\n\n if not final_table:\n final_table = [\"\\n\\n{:-^80}\\n\".format(\"ANALYSIS OF RESULTS\")]\n\n if not self.info.categories[\"integrated\"]:\n final_table.append(\"NO IMAGES INTEGRATED!\")\n else:\n label_lens = [len(v[\"label\"]) for k, v in self.info.stats.items()]\n max_label = int(5 * round(float(np.max(label_lens)) / 5)) + 5\n for k, v in self.info.stats.items():\n if k in (\"lres\", \"res\", \"beamX\", \"beamY\"):\n continue\n line = (\n \"{: <{l}}: max = {:<6.2f} min = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n final_table.append(line)\n\n # TODO: Figure out what to do with summary charts\n # # If more than one integrated image, plot various summary graphs\n # if len(self.info.categories['integrated']) > 1:\n # plot = Plotter(self.params, self.info)\n # if self.params.analysis.summary_graphs:\n # if ( self.params.advanced.processing_backend == 'ha14' and\n # self.params.cctbx_ha14.grid_search.type is not None\n # ):\n # plot.plot_spotfinding_heatmap(write_files=True)\n # plot.plot_res_histogram(write_files=True)\n # med_beamX, med_beamY, pixel_size = plot.plot_beam_xy(write_files=True,\n # return_values=True)\n # else:\n # with warnings.catch_warnings():\n # # To catch any 'mean of empty slice' runtime warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # beamXY_info = plot.calculate_beam_xy()\n # beamX, beamY = beamXY_info[:2]\n # med_beamX = np.median(beamX)\n # med_beamY = np.median(beamY)\n # pixel_size = beamXY_info[-1]\n\n final_table.append(\n \"{: <{l}}: X = {:<4.2f}, Y = {:<4.2f}\"\n \"\".format(\n \"Median Beam Center\",\n self.info.stats[\"beamX\"][\"mean\"],\n self.info.stats[\"beamY\"][\"mean\"],\n l=max_label,\n )\n )\n\n # Special entry for resolution last\n v = self.info.stats[\"res\"]\n final_table.append(\n \"{: <{l}}: low = {:<6.2f} high = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n\n for item in final_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(final_table=final_table)", "def getstats_fromimage(path_data, label, filename):\n path_image = get_path_image(path_data, label, filename)\n image = np.fromfile(path_image, np.float64)\n\n max_ = np.amax(image)\n min_ = np.amin(image)\n mean = np.mean(image)\n std = np.std(image)\n\n return max_, min_, mean, std", "def advancedStats():", "def compute_statistics(self):", "def image_summary(self, tag, images, step):\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)", "def image_summary(self, tag, images, step):\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)", "def print_summary(self, write_files=True):\n\n assert self.info\n\n if not self.info.categories[\"integrated\"]:\n util.main_log(\n self.info.logfile,\n \"NO IMAGES SUCCESSFULLY PROCESSSED!\",\n (not self.gui_mode),\n )\n return\n\n summary = []\n summary.append(\"\\n\\n{:-^80}\\n\".format(\"SUMMARY\"))\n categories = [\n \"total\",\n \"failed_triage\",\n \"have_diffraction\",\n \"failed_spotfinding\",\n \"failed_indexing\",\n \"failed_grid_search\",\n \"failed_integration\",\n \"failed_filter\",\n \"integrated\",\n ]\n for cat in categories:\n lst, fail, fn, _ = self.info.categories[cat]\n path = os.path.join(self.info.int_base, fn)\n if len(lst) > 0 or cat in (\"integrated\", \"diffraction\"):\n summary.append(\"{: <20}: {}\".format(\"{} \".format(fail), len(lst)))\n with open(path, \"w\") as cf:\n for item in lst:\n if isinstance(item, tuple) or isinstance(item, list):\n item = \", \".join([str(i) for i in item])\n cf.write(\"{}\\n\".format(item))\n if cat == \"integrated\" and write_files:\n if not hasattr(self, \"prime_data_path\"):\n self.prime_data_path = path\n\n summary.append(\"\\n\\nIOTA version {0}\".format(iota_version))\n summary.append(\"{}\\n\".format(now))\n\n for item in summary:\n util.main_log(self.info.logfile, \"{}\".format(item), False)\n self.info.update(summary=summary)", "def do_info (self, line) :\n\t\tprint\n\t\tprint get_info_string( self.__image )\n\t\tprint", "def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg", "def summarize(self):\n # NOTE: should be moved to abstract superclass\n failcount = len(self.mosaictrees) - len(self)\n msg = \"Parsed %i mosaics from the FluoView project.\\n\\n\" % len(self)\n if failcount > 0:\n msg += (\n \"\\n==== WARNING ====== WARNING ====\\n\\n\"\n \"Parsing failed on %i mosaic(s). Missing files?\\n \"\n \"\\n==== WARNING ====== WARNING ====\\n\\n\\n\" % failcount\n )\n for mos in self:\n msg += \"Mosaic %i: \" % mos.supplement[\"index\"]\n msg += \"%i x %i tiles, \" % (mos.dim[\"X\"], mos.dim[\"Y\"])\n msg += \"%.1f%% overlap.\\n\" % mos.get_overlap()\n return msg", "def print_summary(self):\n\t\t\n\t\tif not self.objects:\n\t\t\tsys.stderr.write(\"No objects.\\n\")\n\t\t\treturn\n\t\t\n\t\t# Summary header data\n\t\theader = (\"ok\", \"error\", \"zdata\", \"xdata\", \"odata\", \"ratio\")\n\t\t\n\t\t# Summary header format\n\t\tfield = \" %11s\"\n\t\tfmt = field * len(header)\n\t\twidth = len(field % \"\") * len(header)\n\t\ts_line = \"-\" * width\n\t\td_line = \"=\" * width\n\t\t\n\t\t# Verbose header data\n\t\tvheader = (\"ok?\", \"type\", \"id\", \"zdata\", \"xdata\", \"odata\", \"ratio\")\n\t\t\n\t\t# Verbose header format\n\t\tvfmt = \" %3s %7s\" + field * 5\n\t\t\n\t\t# Summary data\n\t\tc_ratio = None\n\t\to_ok = o_error = 0\n\t\tz_data_size = x_data_size = o_data_size = 0\n\t\t\n\t\tif self.verbose:\n\t\t\tprint vfmt % vheader\n\t\t\tprint s_line\n\t\t\n\t\t# Gather data from objects\n\t\tfor obj in self.objects:\n\t\t\tif obj.v_all:\n\t\t\t\to_ok += 1\n\t\t\t\tif obj.z_data_size: z_data_size += obj.z_data_size\n\t\t\t\tif obj.x_data_size: x_data_size += obj.x_data_size\n\t\t\t\tif obj.o_data_size: o_data_size += obj.o_data_size\n\t\t\telse:\n\t\t\t\to_error += 1\n\t\t\t\n\t\t\tif self.verbose:\n\t\t\t\tv_c_ratio = None\n\t\t\t\t\n\t\t\t\t# Calculate compression if possible\n\t\t\t\tif obj.z_data_size and obj.x_data_size:\n\t\t\t\t\tv_c_ratio = str(100 * obj.z_data_size / obj.x_data_size) + \"%\"\n\t\t\t\t\n\t\t\t\t# Build verbose data\n\t\t\t\tv_data = (\n\t\t\t\t\t\"[Y]\" if obj.v_all else \"[N]\",\n\t\t\t\t\tobj.o_data_type or \"N/A\",\n\t\t\t\t\tobj.id[:10],\n\t\t\t\t\tobj.z_data_size or \"N/A\",\n\t\t\t\t\tobj.x_data_size or \"N/A\",\n\t\t\t\t\tobj.o_data_size or \"N/A\",\n\t\t\t\t\tv_c_ratio or \"N/A\"\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\t# Print verbose data\n\t\t\t\tprint vfmt % v_data\n\t\t\n\t\tif self.verbose:\n\t\t\tprint d_line\n\t\t\n\t\t# Calculate compression ratio\n\t\tif z_data_size and x_data_size:\n\t\t\tc_ratio = str(100 * z_data_size / x_data_size) + \"%\"\n\t\t\n\t\t# Print summary\n\t\tprint fmt % header\n\t\tprint s_line\n\t\tprint fmt % (o_ok, o_error, z_data_size, x_data_size, o_data_size, c_ratio)", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def _stats(self):\n return (\"size = \" + str(self.size())\n + \"; height = \" + str(self.height()))", "def print_stats():\n if spritegroup_stats[0] > 0:\n generic.print_info(\"Concurrent spritegroups: {}/{} ({})\".format(spritegroup_stats[0], total_action2_ids, str(spritegroup_stats[1])))\n if a2register_stats[0] > 0:\n generic.print_info(\"Concurrent Action2 registers: {}/{} ({})\".format(a2register_stats[0], total_tmp_locations, str(a2register_stats[1])))", "def summary(self):\n raise NotImplementedError", "def describe():", "def describe(self):\n\n print(\"Correlation length: {0}\".format(self.cl))\n print(\"icoordchange: {0}\".format(self.icoordchange))\n print(\"ispec: {0}\".format(self.ispec))\n print(\"ireg: {0}\".format(self.ireg))\n print(\"Domain: x-axis: from {0} to {1} with {2} steps of {3}\".format(self.xori, self.xend,\n self.nx, self.dx))\n print(\"Domain: y-axis: from {0} to {1} with {2} steps of {3}\".format(self.yori, self.yend,\n self.ny, self.dy))\n print(\"Exclusion value: {0}\".format(self.valex))\n print(\"Signal-to-noise ratio: {0}\".format(self.snr))\n print(\"Variance of the background field: {0}\".format(self.varbak))", "def statistics(img1_blobs, img2_blobs, matches):\n\tstatistics = {}\n\n\tstatistics['#Interest Points in img1'] = len(img1_blobs)\n\tstatistics['#Interest Points in img2'] = len(img2_blobs)\n\tstatistics['Accepted Matches'] = len(matches)\n\tdissimilarity = [match[2] for match in matches]\n\tstatistics['Mean of accepted matches'] = sum(dissimilarity)/len(dissimilarity)\n\tstatistics['SD of accepted matches'] = np.std(dissimilarity)\n\treturn statistics", "def summary(self):\n print('est0: %s (%s) shape: %s' % (str(self.est0.name),\\\n str(self.est0.type_name),str(self.shape0)))\n print('est1: %s (%s) shape: %s' % (str(self.est1.name),\\\n str(self.est1.type_name),str(self.shape1)))", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def analyze(self):\n try:\n self.options[self.multi_image][1]()\n except:\n raise Exception(\"Multi Image Option not defined.\")\n\n self.image = self.data / self.exposure\n\n background = self.min_val = np.min(self.image[:511,:511])\n self.max_val = np.max(self.image[:511,:511])\n # stats.mode returns modal value = value that occours most often\n #background = stats.mode(im[:50,:50].ravel())[0][0]\n\n intensity = self.image.sum() - background*np.size(self.image)\n\n #results.append((self.index, intensity, background))\n self.index =+ 1", "def info(self):\n\n print(\"pupil file =\", self.pupil_file)\n print(\"phase file =\", self.phase_file)\n print(\"wavelengths and weights =\")\n for i in range(len(self.filter[0])):\n print(\" %10.5f %6.4f\" % (self.filter[0][i], self.filter[1][i]))\n print(\"pupil diameter (meters) =\", self.D)\n if self.oversample == 2:\n print(\"oversampling factor = 2 (Nyquist sampling)\")\n else:\n r = float(self.oversample) / 2.\n print(\"oversampling factor = %d (%g * Nyquist sampling)\" % \\\n (self.oversample, r))\n if self.type == SINGLE_PREC:\n print(\"computations will use single precision\")\n else:\n print(\"computations will use double precision\")\n print(\"size of output image =\", self.output_size)\n if self.cdelt is not None:\n print(\"output pixel size (arcsec) =\", self.cdelt / ARCSECtoDEGREES)\n if self.output_written:\n print(\"The computed PSF has been written to the output file.\")\n else:\n print(\"The output file has not been written yet.\")", "def _printout_images_info(design_path):\r\n _max_pic_number = 8\r\n images = dict()\r\n for foo in os.listdir(design_path):\r\n abs_foo = os.path.join(design_path, foo)\r\n if os.path.isfile(abs_foo):\r\n continue\r\n if foo.endswith(\"Images\"):\r\n images.setdefault(foo, list())\r\n for bar in os.listdir(abs_foo):\r\n if bar.endswith(\".png\"):\r\n images[foo].append(bar)\r\n if images:\r\n for k, v in list(images.items()):\r\n v.sort(key=sort_by_num, reverse=True)\r\n nine_images = dict()\r\n images_number = 0\r\n for i in range(0, 10):\r\n if images_number > _max_pic_number:\r\n break\r\n for k, v in list(images.items()):\r\n nine_images.setdefault(k, list())\r\n try:\r\n nine_images[k].append(v[i])\r\n images_number += 1\r\n if images_number > _max_pic_number:\r\n break\r\n except IndexError:\r\n continue\r\n say_it(\"\")\r\n say_it(\"Images Number: {}\".format(images_number))\r\n ii = 1\r\n for kk, vv in list(nine_images.items()):\r\n for foo in vv:\r\n say_it(\"-PNG{}: {}/{}\".format(ii, kk, foo))\r\n ii += 1", "def tf_summary_image(image, boxes, name='image'):\n image = tf.expand_dims(image, 0)\n boxes = tf.expand_dims(boxes, 0)\n image_with_box = tf.image.draw_bounding_boxes(image, boxes)\n tf.summary.image(name, image_with_box)", "def printImage(imageObject):\n # TODO\n pass", "def print_statistics(session, batch_image, batch_label, cost, accuracy, type=\"VALIDATION\"):\n loss = session.run(\n cost,\n feed_dict={\n x: batch_image,\n y: batch_label,\n keep_prob: 1.0\n }\n )\n accuracy = session.run(\n accuracy,\n feed_dict={\n x: batch_image,\n y: batch_label,\n keep_prob: 1.0\n }\n )\n print(\"{} :: Loss = {} ; Accuracy = {} \".format(type, loss, accuracy))\n return loss, accuracy", "def print_file_distribution(num_imgs_total, num_imgs_train, num_imgs_val, num_imgs_test):\n print('Total images: ', num_imgs_total)\n print('Training: ', num_imgs_train)\n print('Validation: ', num_imgs_val)\n print('Testing: ', num_imgs_test)", "def info(self):\n ss = \"\\nSummary EffectiveArea2D info\\n\"\n ss += \"----------------\\n\"\n # Summarise data members\n ss += array_stats_str(self.energy, 'energy')\n ss += array_stats_str(self.offset, 'offset')\n ss += array_stats_str(self.eff_area, 'dispersion')\n\n return ss", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def summary(self):\n\n print(\"input label:\", self.__input_label)\n print(\"target label:\", self.__target_label)\n print(\"denoising label:\", self.denoising_label)\n print(\"contains a successful DE:\", self.is_successful())", "def img_series_stats(image_ccd_lst,plots_path,obsdate):\n median_count = []\n mean_count = []\n \n source_hdu = CCDData(image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n median_count.append(np.median(a_file))\n mean_count.append(np.mean(a_file))\n \n min_count_for_median = np.min(median_count)\n min_count_for_mean = np.min(mean_count)\n max_count_for_median = np.max(median_count)\n max_count_for_mean = np.max(mean_count)\n \n plt.figure()\n plt.plot(mean_count, label='mean',color=\"palevioletred\")\n plt.axhline(y=min_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='min mean {:.2f}'.format(min_count_for_mean),alpha=1)\n plt.axhline(y=max_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='max mean {:.2f}'.format(max_count_for_mean),alpha=1)\n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Mean pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_mean.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()\n\n plt.figure()\n plt.plot(median_count, label='median',color=\"darkviolet\")\n plt.axhline(y=min_count_for_median,linestyle='-',linewidth=0.5,color='red',label='min median {:.2f}'.format(min_count_for_median),alpha=1)\n plt.axhline(y=max_count_for_median,linestyle='-',linewidth=0.5,color='red',label='max median {:.2f}'.format(max_count_for_median),alpha=1) \n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Median pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_median.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()", "def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'", "def statistics(self, **kwargs) -> None:\n print(\n tabulate.tabulate(\n list(self._iter_statistics(**kwargs)),\n headers=[\"path\", \"type\", \"occurences\", \"%\"],\n floatfmt=\".3f\",\n )\n )", "def summary(self):\n self.tiles.refreshnames()\n self.glues.refreshnames()\n # self.check_consistent()\n info = {\n \"ntiles\": len(self.tiles),\n \"nrt\": len([x for x in self.tiles if not x.is_fake]),\n \"nft\": len([x for x in self.tiles if x.is_fake]),\n \"nends\": len(self.glues),\n \"ntends\": len(self.tiles.glues_from_tiles()),\n \"tns\": \" \".join(x.name for x in self.tiles if x.name),\n \"ens\": \" \".join(x.name for x in self.glues if x.name)\n # if (\"info\" in self.keys() and \"name\" in self[\"info\"].keys())\n # else \"\",\n }\n tun = sum(1 for x in self.tiles if x.name is None)\n if tun > 0:\n info[\"tns\"] += \" ({} unnamed)\".format(tun)\n eun = sum(1 for x in self.glues if x.name is None)\n if eun > 0:\n info[\"ens\"] += \" ({} unnamed)\".format(eun)\n if info[\"nft\"] > 0:\n info[\"nft\"] = \" (+ {} fake)\".format(info[\"nft\"])\n else:\n info[\"nft\"] = \"\"\n return \"TileSet: {nrt} tiles{nft}, {nends} ends, {ntends} ends in tiles.\\nTiles: {tns}\\nEnds: {ens}\".format(\n **info\n )", "def printSummary(self):\n pass", "def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)", "def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary", "def print_stats(ds):\n print(\"Dataset Name: \" + ds.name)\n print(\"Dataset Mode: \" + ds.mode)\n print(\"Band Count: \" + str(ds.count))\n print(\"Dataset Width: \" + str(ds.width))\n print(\"Dataset Height: \" + str(ds.height))\n print(\"Dataset Bounds: \", ds.bounds)\n print(\"Dataset Transform: \", ds.transform)\n ul = ds.transform * (0, 0)\n print(\"Upper Left Corner: \", ul)\n lr = ds.transform * (ds.width, ds.height)\n print(\"Lower Right Corner: \", lr)\n {i: dtype for i, dtype in zip(ds.indexes, ds.dtypes)}", "def print_summary(self):\n self.model.summary()", "def summary(self):\n return self.pfm", "def generate_image_info(path):\n file_types = ['*.png', '*.jpg', '*.gif']\n for file_type in file_types:\n for img_path in glob.glob(path + file_type):\n img = Image.open(img_path)\n img_name = img_path.split('/')[-1].split('.')[0]\n with open(path + 'resolution.txt', 'a') as file:\n file.write(img_name + ' ' + str(img.size[0]) +\n ' ' + str(img.size[1]) + '\\n')", "def show_stats(self):\n print(\"\\nName: \" + self.name)\n print(\"Element Type: \" + self.element)\n print(\"Health: \" + str(self.current_health) + \" / \" + str(self.max_health))\n print(\"Speed: \" + str(self.speed))", "def calculate_stats(images, global_normalization=True):\n if global_normalization:\n # flatten first since the images might not be the same size\n flat = np.concatenate(\n [img.ravel() for img in images]\n )\n pmin, pmax, mean, std = np.percentile(flat, 1), np.percentile(flat, 99.6), np.mean(flat), np.std(flat)\n else:\n pmin, pmax, mean, std = None, None, None, None\n\n return {\n 'pmin': pmin,\n 'pmax': pmax,\n 'mean': mean,\n 'std': std\n }", "def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))", "def print_summary(stim_table):\n print(\n '{:<20}{:>15}{:>15}\\n'.format('Colname', 'No. conditions', 'Mean N/cond')\n )\n for colname in stim_table.columns:\n conditions, occurrences = np.unique(\n np.nan_to_num(stim_table[colname]), return_counts = True\n )\n print(\n '{:<20}{:>15}{:>15.1f}'.format(\n colname, len(conditions), np.mean(occurrences)\n )\n )", "def show_summary(self) -> None:\n all_averages = []\n\n for i in self.album_statistics.values():\n try:\n all_averages.append(i['avg'])\n except (TypeError, ValueError):\n pass\n # print(all_averages)\n try:\n final_average = math.ceil(np.mean(all_averages))\n except ValueError:\n click.echo(\n 'Oops! https://lyrics.ovh couldn\\'t find any lyrics across any'\n ' album. This is caused by inconsistent Artist names from'\n ' Musicbrainz and lyrics.ovh. Try another artist.'\n )\n raise (SystemExit)\n output = BeautifulTable(max_width=200)\n output.set_style(BeautifulTable.STYLE_BOX_ROUNDED)\n output.column_headers = [\n 'Average number of words in tracks across all albums\\n'\n f'for {self.artist}'\n ]\n output.append_row([final_average])\n click.echo(output)\n\n return self", "def summary(self, yname=None, xname=None, title=None, alpha=.05):\n # TODO: Make this raise upstream instead of just \"pass\"\n raise NotImplementedError # pragma: no cover\n # TODO: move the GenericLikelihoodModelResults implementation here?", "def stats(self):", "def visual_image(img, annos, save_path, ratio=None, height=None, width=None, name=None, score_threshold=0.01):\n # annos: list type, in which all the element is dict\n h, w = img.shape[0], img.shape[1]\n if height is not None and width is not None and (height != h or width != w):\n img, annos = resize_image(img, annos, width, height)\n elif ratio not in (None, 1):\n img, annos = resize_image(img, annos, w * ratio, h * ratio)\n\n h, w = img.shape[0], img.shape[1]\n num_objects = len(annos)\n num = 0\n\n def define_color(pair):\n \"\"\"define line color\"\"\"\n left_part = [0, 1, 3, 5, 7, 9, 11, 13, 15]\n right_part = [0, 2, 4, 6, 8, 10, 12, 14, 16]\n if pair[0] in left_part and pair[1] in left_part:\n color = (255, 0, 0)\n elif pair[0] in right_part and pair[1] in right_part:\n color = (0, 0, 255)\n else:\n color = (139, 0, 255)\n return color\n\n def visible(a, w, h):\n return a[0] >= 0 and a[0] < w and a[1] >= 0 and a[1] < h\n\n for i in range(num_objects):\n ann = annos[i]\n bbox = coco_box_to_bbox(ann['bbox'])\n if \"score\" in ann and (ann[\"score\"] >= score_threshold or num == 0):\n num += 1\n txt = (\"p\" + \"{:.2f}\".format(ann[\"score\"]))\n cv2.putText(img, txt, (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)\n\n ct = (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2))\n cv2.circle(img, ct, 2, (0, 255, 0), thickness=-1, lineType=cv2.FILLED)\n bbox = np.array(bbox, dtype=np.int32).tolist()\n cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)\n\n keypoints = ann[\"keypoints\"]\n keypoints = np.array(keypoints, dtype=np.int32).reshape(_NUM_JOINTS, 3).tolist()\n\n for pair in data_cfg.edges:\n partA = pair[0]\n partB = pair[1]\n color = define_color(pair)\n p_a = tuple(keypoints[partA][:2])\n p_b = tuple(keypoints[partB][:2])\n mask_a = keypoints[partA][2]\n mask_b = keypoints[partB][2]\n if (visible(p_a, w, h) and visible(p_b, w, h) and mask_a * mask_b > 0):\n cv2.line(img, p_a, p_b, color, 2)\n cv2.circle(img, p_a, 3, color, thickness=-1, lineType=cv2.FILLED)\n cv2.circle(img, p_b, 3, color, thickness=-1, lineType=cv2.FILLED)\n\n img_id = annos[0][\"image_id\"] if annos and \"image_id\" in annos[0] else random.randint(0, 9999999)\n image_name = \"cv_image_\" + str(img_id) + \".png\" if name is None else \"cv_image_\" + str(img_id) + name + \".png\"\n cv2.imwrite(\"{}/{}\".format(save_path, image_name), img)", "def summary(self, printed=True):\n raise NotImplementedError", "def create_summary(image_summaries, scalar_summaries):\n for key, value in image_summaries.items():\n tf.summary.image(key, unscale(value))\n for key, value in scalar_summaries.items():\n tf.summary.scalar(key, value)\n return tf.summary.merge_all()", "def summary(self) -> str:\n pass", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def cntInfo(img, cnt):\n\tpts = extremePoints(cnt)\n\troi = crop(img, pts[\"L\"][0], pts[\"T\"][1], pts[\"R\"][0], pts[\"B\"][1])\n\tm = minMaxLoc(roi)\n\tm[\"minLoc\"] = (m[\"minLoc\"][0] + pts[\"L\"][0], m[\"minLoc\"][1] + pts[\"T\"][1])\n\tm[\"maxLoc\"] = (m[\"maxLoc\"][0] + pts[\"L\"][0], m[\"maxLoc\"][1] + pts[\"T\"][1])\n\tcross = abs(pts[\"L\"][0] - pts[\"R\"][0])\n\theight = abs(pts[\"T\"][1] - pts[\"B\"][1])\n\tcent = centroid(cnt)\n\tangle = orientation(cnt)\n\tareaVal = area(cnt)\n\tper = perimeter(cnt)\n\tar = aspectRatio(cnt)\n\text = extent(cnt)\n\tsold = solidity(cnt)\n\teqD = equivalentDiameter(cnt)\n\tme = meanVal(grayscale(roi))\n\tsu = sumPixel(grayscale(roi))\n\td = {\"sum intensity\":su, \"mean intensity\":me, \"area\":areaVal, \"perimeter\":per, \"aspect ratio\":ar, \"extent\":ext,\"solidity\":sold, \"equivalent diameter\":eqD, \"width\": cross, \"height\" : height, \"centroid\" : cent, \"extrema\" : pts, \"min\":m[\"minLoc\"], \"max\":m[\"maxLoc\"], \"orientation\" : angle}\n\treturn d", "def summary_images(\n cover,\n encoded,\n transmitted_encoded,\n transmitted_cover,\n step,\n transform_fn=None):\n\n imgs = prep_imgs_to_plot(\n cover, encoded, transmitted_encoded, transmitted_cover, transform_fn)\n\n images_to_plot = imgs['images']\n\n names_to_plot = imgs['names']\n\n descriptions_to_plot = imgs['descriptions']\n\n for i, name in enumerate(names_to_plot):\n tf.summary.image(\n name=name,\n data=images_to_plot[i],\n step=step,\n max_outputs=6,\n description=descriptions_to_plot[i]\n )", "def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti", "def _describe(self) -> Dict[str, Any]:", "def display(self, image):\n raise NotImplementedError()", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def print_summary_metrics(lst):\n print('*' * 50)\n print(' ' * 16 + 'Summary statistics')\n print('*' * 50)\n print('mean: {} | median: {} | mode: {}'.format(get_mean(lst),\n get_median(lst),\n get_mode(lst)))\n print('range: {} | IQR: {}'.format(get_range(list_nums),\n get_IQR(list_nums)))\n print('\\n')\n print('original list: \\n {}'.format(lst))\n print('sorted list: \\n {}'.format(sorted(lst)))\n print('List without outliers: \\n {}'.format(\n remove_outliers(list_nums)))", "def printInfo(matrix):\n\n print(\"Groups:\")\n for group in matrix.matrix.group_labels:\n print(\"\\t{0}\".format(group))\n\n print(\"Samples:\")\n for sample in matrix.matrix.sample_labels:\n print(\"\\t{0}\".format(sample))", "def calculates_results_stats(results_dic): \n # Creates empty dictionary for results_stats_dic\n results_stats_dic = dict()\n \n # Sets all counters to initial values of zero so that they can \n # be incremented while processing through the images in results_dic \n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n \n # process through the results dictionary\n for key in results_dic:\n \n # Labels Match Exactly\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n\n # TODO: 5a. REPLACE pass with CODE that counts how many pet images of\n # dogs had their breed correctly classified. This happens \n # when the pet image label indicates the image is-a-dog AND \n # the pet image label and the classifier label match. You \n # will need to write a conditional statement that determines\n # when the dog breed is correctly classified and then \n # increments 'n_correct_breed' by 1. Recall 'n_correct_breed' \n # is a key in the results_stats_dic dictionary with it's value \n # representing the number of correctly classified dog breeds.\n # \n # Pet Image Label is a Dog AND Labels match- counts Correct Breed\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n \n # Pet Image Label is a Dog - counts number of dog images\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n \n # Classifier classifies image as Dog (& pet image is a dog)\n # counts number of correct dog classifications\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n\n # TODO: 5b. REPLACE pass with CODE that counts how many pet images \n # that are NOT dogs were correctly classified. This happens \n # when the pet image label indicates the image is-NOT-a-dog \n # AND the classifier label indicates the images is-NOT-a-dog.\n # You will need to write a conditional statement that \n # determines when the classifier label indicates the image \n # is-NOT-a-dog and then increments 'n_correct_notdogs' by 1. \n # Recall the 'else:' above 'pass' already indicates that the \n # pet image label indicates the image is-NOT-a-dog and \n # 'n_correct_notdogs' is a key in the results_stats_dic dictionary \n # with it's value representing the number of correctly \n # classified NOT-a-dog images.\n # \n # Pet Image Label is NOT a Dog\n else:\n # Classifier classifies image as NOT a Dog(& pet image isn't a dog)\n # counts number of correct NOT dog clasifications.\n if results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n\n\n # Calculates run statistics (counts & percentages) below that are calculated\n # using the counters from above.\n\n # calculates number of total images\n results_stats_dic['n_images'] = len(results_dic)\n\n # calculates number of not-a-dog images using - images & dog images counts\n results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] - \n results_stats_dic['n_dogs_img']) \n\n # TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # matched images. Recall that this can be calculated by the\n # number of correctly matched images ('n_match') divided by the \n # number of images('n_images'). This result will need to be \n # multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct for matches\n results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) * 100\n\n # TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified dog images. Recall that this can be calculated by \n # the number of correctly classified dog images('n_correct_dogs')\n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct dogs\n results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) * 100\n\n # TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified breeds of dogs. Recall that this can be calculated \n # by the number of correctly classified breeds of dog('n_correct_breed') \n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct breed of dog\n results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) * 100\n\n # Calculates % correct not-a-dog images\n # Uses conditional statement for when no 'not a dog' images were submitted \n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /\n results_stats_dic['n_notdogs_img']) * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n\n \n # TODO 5f. REPLACE None with the results_stats_dic dictionary that you \n # created with this function \n return results_stats_dic", "def print_info(df):\n\n # Data statistics\n # Number of total samples\n print('There are {n_samples} samples in total.'.format(n_samples=len(list(df.index.get_level_values(0).unique()))))\n\n # Count the different types of labels\n unique = df['label'].unique()\n count = []\n\n for label in unique:\n count.append(len(df.index.get_level_values(0)[df['label'] == label].unique()))\n\n count_dict = {unique[i]: count[i] for i in range(len(unique))}\n count_dict_percentage = {\n unique[i]: np.round(count[i] / len(list(df.index.get_level_values(0).unique())), decimals=2)\n for i in range(len(unique))}\n\n print('The types and counts of different labels : \\n {count_dict}'.format(count_dict=count_dict))\n print('The types and counts of different labels as percentage of the total data'\n ' : \\n {count_dict}'.format(count_dict=count_dict_percentage))", "def testStatsZebra(self):\n image2 = self.image.Factory(self.image, True)\n #\n # Add 1 to every other row, so the variance is 1/4\n #\n self.assertEqual(image2.getHeight()%2, 0)\n width = image2.getWidth()\n for y in range(1, image2.getHeight(), 2):\n sim = image2.Factory(image2, afwGeom.Box2I(afwGeom.Point2I(0, y), afwGeom.Extent2I(width, 1)),\n afwImage.LOCAL)\n sim += 1\n\n if display:\n ds9.mtv(self.image, frame = 0)\n ds9.mtv(image2, frame = 1)\n\n stats = afwMath.makeStatistics(image2,\n afwMath.NPOINT | afwMath.STDEV | afwMath.MEAN | afwMath.ERRORS)\n mean = stats.getResult(afwMath.MEAN)\n n = stats.getValue(afwMath.NPOINT)\n sd = stats.getValue(afwMath.STDEV)\n\n self.assertEqual(mean[0], image2.get(0, 0) + 0.5)\n self.assertEqual(sd, 1/math.sqrt(4.0)*math.sqrt(n/(n - 1)))\n self.assertAlmostEqual(mean[1], sd/math.sqrt(image2.getWidth()*image2.getHeight()), 10)\n\n meanSquare = afwMath.makeStatistics(image2, afwMath.MEANSQUARE).getValue()\n self.assertEqual(meanSquare, 0.5*(image2.get(0, 0)**2 + image2.get(0, 1)**2))", "def describe(image):\n needle = cv2.imread(image, 0)\n orb = cv2.ORB()\n keypoints, description = orb.detectAndCompute(needle, None)\n print(keypoints)\n print(description)\n return keypoints, description", "def aligned_comparison_stats(unaligned_image_ccd_lst,aligned_image_ccd_lst,\n plots_path,obsdate):\n aligned_mean_count = []\n aligned_median_count = []\n unaligned_mean_count = []\n unaligned_median_count = []\n \n # getting information from source image for saving purposes\n source_hdu = CCDData(aligned_image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in unaligned_image_ccd_lst[1:]:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n unaligned_mean_count.append(np.mean(a_file))\n unaligned_median_count.append(np.median(a_file))\n \n min_count_for_unaligned_mean = np.min(unaligned_mean_count)\n max_count_for_unaligned_mean = np.max(unaligned_mean_count)\n min_count_for_unaligned_median = np.min(unaligned_median_count)\n max_count_for_unaligned_median = np.max(unaligned_median_count)\n \n for a_file in aligned_image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n aligned_mean_count.append(np.mean(a_file))\n aligned_median_count.append(np.median(a_file))\n \n min_count_for_aligned_mean = np.min(aligned_mean_count)\n max_count_for_aligned_mean = np.max(aligned_mean_count)\n min_count_for_aligned_median = np.min(aligned_median_count)\n max_count_for_aligned_median = np.max(aligned_median_count)\n \n # plotting\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15,8),sharex=True, sharey=False)\n fig.suptitle('Comparison Statistics for Non-Aligned Images vs Aligned Images for {}'.format(obsdate))\n \n # unaligned mean\n ax1.plot(unaligned_mean_count, label='mean',color=\"deeppink\")\n ax1.axhline(y=min_count_for_unaligned_mean,linestyle='-',linewidth=1,\n color='black',label='min mean {:.2f}'.format(min_count_for_unaligned_mean),alpha=1)\n ax1.axhline(y=max_count_for_unaligned_mean,linestyle='-',linewidth=1,\n color='black',label='max mean {:.2f}'.format(max_count_for_unaligned_mean),alpha=1)\n ax1.set_xlabel('Image number')\n ax1.set_ylabel('Count (ADU)')\n ax1.set_title('Mean pixel value for unaligned images')\n ax1.legend(loc=\"best\")\n ax1.grid(b=True, which='both', axis='both')\n # ax1.sharex(ax3)\n \n # unaligned median\n ax2.plot(unaligned_median_count, label='median',color=\"deeppink\")\n ax2.axhline(y=min_count_for_unaligned_median,linestyle='-',linewidth=1,\n color='black',label='min median {:.2f}'.format(min_count_for_unaligned_median),alpha=1)\n ax2.axhline(y=max_count_for_unaligned_median,linestyle='-',linewidth=1,\n color='black',label='max median {:.2f}'.format(max_count_for_unaligned_median),alpha=1)\n ax2.set_xlabel('Image number')\n ax2.set_ylabel('Count (ADU)')\n ax2.set_title('Median pixel value for unaligned images')\n ax2.legend(loc=\"best\")\n ax2.grid(b=True, which='both', axis='both')\n # ax2.sharex(ax4)\n \n # aligned mean\n ax3.plot(aligned_mean_count, label='mean',color=\"darkviolet\")\n ax3.axhline(y=min_count_for_aligned_mean,linestyle='-',linewidth=1,\n color='black',label='min mean {:.2f}'.format(min_count_for_aligned_mean),alpha=1)\n ax3.axhline(y=max_count_for_aligned_mean,linestyle='-',linewidth=1,\n color='black',label='max mean {:.2f}'.format(max_count_for_aligned_mean),alpha=1)\n ax3.set_xlabel('Image number')\n ax3.set_ylabel('Count (ADU)')\n ax3.set_title('Mean pixel value for aligned images')\n ax3.legend(loc=\"best\")\n ax3.grid(b=True, which='both', axis='both')\n \n \n # aligned median\n ax4.plot(aligned_median_count, label='median',color=\"darkviolet\")\n ax4.axhline(y=min_count_for_aligned_median,linestyle='-',linewidth=1,\n color='black',label='min median {:.2f}'.format(min_count_for_aligned_median),alpha=1)\n ax4.axhline(y=max_count_for_aligned_median,linestyle='-',linewidth=1,\n color='black',label='max median {:.2f}'.format(max_count_for_aligned_median),alpha=1)\n ax4.set_xlabel('Image number')\n ax4.set_ylabel('Count (ADU)')\n ax4.set_title('Median pixel value for unaligned images')\n ax4.legend(loc=\"best\")\n ax4.grid(b=True, which='both', axis='both')\n \n \n for ax in fig.get_axes():\n ax.label_outer()\n \n fig.savefig(plots_path/\"{}-{}-{}-comparison_stats.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=1000)\n fig.show()", "def visualize(**images):\n n_images = len(images)\n plt.figure(figsize=(20,8))\n for idx, (name, image) in enumerate(images.items()):\n plt.subplot(1, n_images, idx + 1)\n plt.xticks([]); \n plt.yticks([])\n # get title from the parameter names\n plt.title(name.replace('_',' ').title(), fontsize=20)\n plt.imshow(image)\n plt.savefig('sample_gt_pred_2_max.jpeg')\n plt.show()", "def _print_summary(data, metric):\n\n print(u'Cortical thickness {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 0].mean(), data[:, 0].std(ddof=1),\n data[:, 0].min(), data[:, 0].max()))\n print('Other modalities {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data[:, 1:].mean(), data[:, 1:].std(ddof=1),\n data[:, 1:].min(), data[:, 1:].max()))\n print('Overall {}: {:.2f} \\u00B1 {:.2f} [{:.2f}--{:.2f}]'\n .format(metric, data.mean(), data.std(ddof=1),\n data.min(), data.max()))" ]
[ "0.73849064", "0.7212866", "0.7087518", "0.70753783", "0.6967849", "0.68458074", "0.6760577", "0.674482", "0.6678487", "0.66494477", "0.6574271", "0.6533716", "0.64291257", "0.63930213", "0.6344323", "0.63426876", "0.63391554", "0.63218725", "0.63212085", "0.6293195", "0.62893045", "0.62756395", "0.62569714", "0.62569714", "0.62208235", "0.62120205", "0.6186777", "0.6116503", "0.6091363", "0.60903686", "0.60451055", "0.6032356", "0.60212535", "0.59832823", "0.59823805", "0.5963851", "0.5963851", "0.5963006", "0.5961557", "0.5955537", "0.5955537", "0.59546375", "0.59517586", "0.5916972", "0.5908537", "0.59028816", "0.5901189", "0.58972985", "0.5890638", "0.58873147", "0.58820784", "0.5877452", "0.5873755", "0.58703023", "0.58504575", "0.5841564", "0.5841547", "0.5840304", "0.5840249", "0.58332604", "0.58022314", "0.5797618", "0.5787392", "0.5785679", "0.5784243", "0.57681316", "0.5766618", "0.57642233", "0.57544005", "0.575274", "0.5749759", "0.5741401", "0.5736139", "0.57339853", "0.5727522", "0.5722611", "0.5722583", "0.5720094", "0.5712912", "0.5709794", "0.5709104", "0.5708907", "0.570818", "0.57000756", "0.569805", "0.56927973", "0.5690715", "0.5689314", "0.56860584", "0.56700975", "0.5659011", "0.56512415", "0.5645127", "0.56238735", "0.56212085", "0.56211215", "0.5616154", "0.5615", "0.5605198", "0.56050324" ]
0.6997572
4
Returns the inverse of the 2d rot and trans matrix
def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0): t = Transform({"type":"2D","alpha":alpha,"tx":tx,"ty":ty,"mirror":mirror,"scale":1.0}) t = t.inverse() t = t.get_params("2D") return t[ "alpha" ], t[ "tx" ], t[ "ty" ], t[ "mirror" ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n return Rotation(self.matrix.transposed())", "def inverse(self):\n rotation_matrix = self.pose_mat[:3, :3]\n translation_vector = self.pose_mat[:3, 3]\n\n rot = np.transpose(rotation_matrix)\n trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n return Transformation(rot, trans)", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def rot_inv(self):\n if not hasattr(self, '_rot_inv'):\n self._rot_inv=np.linalg.inv(self.rot)\n return self._rot_inv", "def inverse_rigid_trans(Tr): \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr", "def trans_matrix_inv(m:numpy.ndarray):\n was2d = False\n if m.shape[1] == 3:\n was2d = True\n m = numpy.asarray([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, m[0,0], m[0,1], m[0,2]],\n [0.0, m[1,0], m[1,1], m[1,2]],\n [0.0, 0.0, 0.0, 1.0]], numpy.float64)\n trans = m[0:3,3]\n rotate = numpy.zeros(3, numpy.float64)\n r = m[0:3,0:3]\n rc = numpy.linalg.cholesky(numpy.matmul(r.T, r)).T\n scale = numpy.diagonal(rc)\n if numpy.linalg.det(r) < 0.0:\n scale[0] *= -1.0\n rcd = rc * numpy.eye(3, dtype=numpy.float64)\n rc = numpy.linalg.solve(rcd, rc)\n shear = numpy.asarray([rc[0,1], rc[0,2], rc[1,2]], numpy.float64)\n r0 = trans_matrix({'rotate': rotate, 'scale': scale, 'shear': shear})[0:3,0:3]\n r0 = numpy.linalg.solve(numpy.linalg.inv(r), numpy.linalg.inv(r0))\n rotate[1] = numpy.arcsin(_frone(r0[0,2]))\n if numpy.abs((numpy.abs(rotate[1]) - (numpy.pi / 2.0))) < 1.0e-6:\n rotate[0] = 0.0\n rotate[2] = numpy.arctan2(-_frone(r0[1,0]), _frone(-r0[2,0] / r0[0,2]))\n else:\n rc = numpy.cos(rotate[1])\n rotate[0] = numpy.arctan2(_frone(r0[1,2] / rc), _frone(r0[2,2] / rc))\n rotate[2] = numpy.arctan2(_frone(r0[0,1] / rc), _frone(r0[0,0] / rc))\n if was2d:\n trans = trans[1:]\n rotate = rotate[0:1]\n scale = scale[1:]\n shear = shear[2:3]\n return (trans, rotate, scale, shear)", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inverse_affine_transformation_matrix(A):\n n, n = A.shape\n\n # extract components R, an n-1xn-1 linear transformation matrix, and T, an nx1 translation matrix\n R = A[:n-1, :n-1]\n T = A[:n-1, n-1]\n\n # find R^-1\n R_inv = np.linalg.inv(R)\n\n # Find A^-1/A_inv\n A_inv = np.copy(A).astype(float) # copy A for base of A^-1 matrix and ensure it is of data type float\n A_inv[:n-1, :n-1] = R_inv # set top left nxn sub matrix equal to R^-1\n A_inv[:n-1, n-1] = np.negative(R_inv.dot(T)) # place -R^-1*T in top right corner\n\n return A_inv", "def affine_transform_inverse(np_transform):\n rotation = np_transform[:3, :3]\n translation = np_transform[:3, 3]\n rotation_inv = numpy.linalg.inv(rotation)\n translation_inv = -1 * numpy.dot(rotation_inv, translation)\n result = numpy.identity(4)\n result[:3, :3] = rotation_inv\n result[:3, 3] = translation_inv.flatten()\n return result", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def InverseRotation(rotation):\n return RotationMatrix([\n [rotation.rot[0][0], rotation.rot[1][0], rotation.rot[2][0]],\n [rotation.rot[0][1], rotation.rot[1][1], rotation.rot[2][1]],\n [rotation.rot[0][2], rotation.rot[1][2], rotation.rot[2][2]]\n ])", "def inverse(self,mat):\n result = np.linalg.inv(mat)\n self.out = result\n return self.out", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)", "def inv(T):\n K, L = T.shape[1:3]\n squ_matrix = np.einsum('ijkl->ikjl', T).reshape((K*L, K*L),order='F')\n t = np.linalg.inv(squ_matrix)\n return np.einsum('ijkl->ikjl', t.reshape((K,L,K,L), order='F'))", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def Invert(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Invert(*args, **kwargs)", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def invert(M):\r\n M = isMatrix(M)\r\n return M.I", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def inverse_transform(self, y: Array2D) -> Array2D:", "def inverse(self):\n rotation = self.rotation.inverse()\n translation = rotation * (-self.translation)\n\n return Transform(rotation, translation)", "def invert(x):\n return linalg.inv(x)", "def T_inv(T):\n R, xyz = rigmech.T2Rxyz(T)\n R_inv = R.T\n xyz_inv = -R_inv * xyz\n T_inv = R_inv.row_join(xyz_inv).col_join(sp.Matrix([[0, 0, 0, 1]]))\n return T_inv", "def inv_rotation_matrix(self):\n return np.linalg.inv(self._rotation_matrix).tolist()", "def inverse_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n # Inverse Fourier Transform matrix:\n ift = np.zeros([x, y], complex)\n\n for i in range(0, x):\n for j in range(0, y):\n sum_ift = 0\n for u in range(0, x):\n for v in range(0, y):\n sum_ift = sum_ift + matrix[u, v] * (np.cos(((2 * np.pi) / N) * (u * i + v * j)) + 1j * np.sin(((2 * np.pi) / N) * (u * i + v * j)))\n\n ift[i, j] = sum_ift\n\n\n return ift/(x*x)", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def inverseTransformationMatrix(self,index=None):\n if self.method == 'pca':\n if index is not None:\n coordinateIndex = distribution1D.vectori_cxx(len(index))\n for i in range(len(index)):\n coordinateIndex[i] = index[i]\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions(coordinateIndex)\n inverseTransformation = self._distribution.getInverseTransformationMatrix(coordinateIndex)\n else:\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions()\n inverseTransformation = self._distribution.getInverseTransformationMatrix()\n row = matrixDim[0]\n column = matrixDim[1]\n # convert 1D vector to 2D array\n L = np.atleast_1d(inverseTransformation).reshape(row,column)\n else:\n self.raiseAnError(NotImplementedError,' inverse transformationMatrix is not yet implemented for ' + self.method + ' method')\n return L", "def matrix_inv(mat):\n\ta = mat[0,0]\n\tb = mat[0,1]\n\tc = mat[0,2]\n\td = mat[1,0]\n\te = mat[1,1]\n\tf = mat[1,2]\n\tg = mat[2,0]\n\th = mat[2,1]\n\ti = mat[2,2]\n\n\tdet = b*f*g + c*d*h + a*e*i - a*f*h - b*d*i - c*e*g\n\n\tinvmat = np.zeros((3,3))\n\tinvmat[0,0] = (e*i - f*h) / det\n\tinvmat[0,1] = (c*h - b*i) / det\n\tinvmat[0,2] = (b*f - c*e) / det\n\tinvmat[1,0] = (f*g - d*i) / det\n\tinvmat[1,1] = (a*i - c*g) / det\n\tinvmat[1,2] = (c*d - a*f) / det\n\tinvmat[2,0] = (d*h - e*g) / det\n\tinvmat[2,1] = (b*g - a*h) / det\n\tinvmat[2,2] = (a*e - b*d) / det\n\treturn invmat", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_GetInverse(self, *args)", "def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )", "def inverse(self, x, y):", "def invert_affine_transform(matrix: Tensor) -> Tensor:\n if not isinstance(matrix, Tensor):\n raise TypeError(f\"Input matrix type is not a Tensor. Got {type(matrix)}\")\n\n if not (len(matrix.shape) == 3 and matrix.shape[-2:] == (2, 3)):\n raise ValueError(f\"Input matrix must be a Bx2x3 tensor. Got {matrix.shape}\")\n\n matrix_tmp: Tensor = convert_affinematrix_to_homography(matrix)\n matrix_inv: Tensor = _torch_inverse_cast(matrix_tmp)\n\n return matrix_inv[..., :2, :3]", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def __invert__(self):\n return self.inverse()", "def complex_inverse(c1,cr):", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def _inv(M):\n ll, mm = M.shape\n M2 = M + 1e-10 * np.eye(ll)\n L = np.linalg.cholesky(M2)\n inv_L = np.linalg.inv(L)\n inv_M = inv_L.T @ inv_L\n return inv_M", "def ssc.inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def inverse(self):\n return self.invert()", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def _vect_matrix_inverse(A):\n identity = np.identity(A.shape[2], dtype=A.dtype)\n return np.array([np.linalg.solve(x, identity) for x in A])", "def inv_inplace(a):", "def invert(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot invert a non-square matrix\")\n if self.determinant == 0:\n raise exc.LinearAlgebraError(\"cannot invert a singular matrix\")\n # TODO: implement block matrices in their own method\n block_rows = [r1 + r2 for r1, r2 in\n zip(self.data, self.makeIdentity(self.m).data)]\n inverse_block = Matrix.fromRows(block_rows).row_reduce()\n return inverse_block.subset([i for i in range(self.m)],\n [j + self.n for j in range(self.n)])", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def inverse_transform(data, flip, rotate):\n if flip == 'FLIP_LEFT_RIGHT':\n data = np.fliplr(data)\n\n if rotate == 'ROTATE_90':\n data = np.rot90(data, 3)\n\n if rotate == 'ROTATE_180':\n data = np.rot90(data, 2)\n\n if rotate == 'ROTATE_270':\n data = np.rot90(data, 1)\n\n return data", "def inverse(self):\n return self._inverse", "def inverse(self):\n ss = self._sum_of_squares()\n if ss > 0:\n return self.__class__(array=(self._vector_conjugate() / ss))\n else:\n raise ZeroDivisionError(\"a zero quaternion (0 + 0i + 0j + 0k) cannot be inverted\")", "def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")", "def inv(self):\n return self.conjugate()", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def _r_inv(self):\n raise NotImplementedError", "def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img", "def tensorinv(a, ind=2):\n return TensorInv(ind)(a)", "def __invert__(self):\n a = self.angle\n x, y = Vector.cartesian([1, a])\n return Vector(x, y)", "def get_inverse_2x2(u, v):\n if not is_linearly_independent_2x2(u, v):\n return\n uv = get_uv(u, v)\n iden = get_uv([1, 0],[0, 1])\n a = np.zeros((2, 4))\n for i in range(2):\n for j in range(2):\n a[i][j] = uv[i][j]\n a[i][j+2] = iden[i][j]\n\n q = a[0][1] / a[1][1]\n a[0] = a[0] - q * a[1]\n\n q = a[1][0] / a[0][0]\n a[1] = a[1] - q * a[0]\n\n a[0] /= a[0][0]\n\n a[1] /= a[1][1]\n\n for i in range(2):\n for j in range(2):\n uv[i][j] = a[i][j+2]\n return uv", "def Inverse(self, freedofs: pyngcore.BitArray = None, inverse: str = '') -> BaseMatrix:", "def Inverse(self, freedofs: pyngcore.BitArray = None, inverse: str = '') -> BaseMatrix:", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n #old_coords = np.array([[2, 2, 1], [6, 6, 1]]).T\n #new_coords = np.matmul(S, old_coords)\n #recovered_coords = np.matmul(S_inv, new_coords)\n #print('new coords: ', new_coords)\n #print('recovered coords: ', recovered_coords)\n return S", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def inverse(self):\n myMatrix = np.array(self.Matrix)\n if np.linalg.det(myMatrix) == 0:\n print(\"This matrix has a determinant of 0, meaning it has no inverse\")\n else:\n self.Inverse = np.linalg.inv(myMatrix)\n print(\"This is the inverse to your matrix: \", self.Inverse)", "def transAffine2D( iScale=(1, 1), iTrans=(0, 0), iRot=0, iShear=(0, 0) ): \n iRot = iRot * np.pi / 180\n oMatScale = np.matrix( ((iScale[0],0,0),(0,iScale[1],0),(0,0,1)) )\n oMatTrans = np.matrix( ((1,0,iTrans[0]),(0,1,iTrans[1]),(0,0,1)) )\n oMatRot = np.matrix( ((np.cos(iRot),-np.sin(iRot),0),\\\n (np.sin(iRot),np.cos(iRot),0),(0,0,1)) )\n oMatShear = np.matrix( ((1,iShear[0],0),(iShear[1],1,0),(0,0,1)) )\n # ustvari izhodno matriko\n oMat2D = oMatTrans * oMatShear * oMatRot * oMatScale\n return oMat2D", "def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)", "def inv_m(self):\n self.m = -self.m", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def inverse(self):\n group = self.group\n r = tuple([(i, -j) for i, j in self.array_form[::-1]])\n return group.dtype(r)", "def inverse_transform(self, Xt):\n # Inverse transform\n columns = []\n start = 0\n\n for j in range(self.n_dims):\n dim = self.dimensions[j]\n offset = dim.transformed_size\n\n if offset == 1:\n columns.append(dim.inverse_transform(Xt[:, start]))\n else:\n columns.append(dim.inverse_transform(Xt[:, start : start + offset]))\n\n start += offset\n\n # Transpose\n rows = []\n\n for i in range(len(Xt)):\n r = []\n for j in range(self.n_dims):\n r.append(columns[j][i])\n\n rows.append(r)\n\n return rows", "def inverse(self):\n n = self.norm()\n c = self.conj()\n d = 1.0 / (n * n)\n c.scale(d)\n return c", "def invert(self, img):\n return self.inverse()(img)", "def inv(M):\n\t#clone the matrix and append the identity matrix\n\t# [int(i==j) for j in range_M] is nothing but the i(th row of the identity matrix\n\tm2 = [row[:]+[int(i==j) for j in range(len(M) )] for i,row in enumerate(M) ]\n\t# extract the appended matrix (kind of m2[m:,...]\n\treturn [row[len(M[0]):] for row in m2] if gauss_jordan(m2) else None", "def get_world_inv_matrix(m_obj, i):\n if not m_obj.hasFn(oMa.MFn.kTransform):\n return\n\n fn_obj = oMa.MFnDependencyNode(m_obj)\n plug = fn_obj.findPlug('worldInverseMatrix', False).elementByLogicalIndex(i)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n return matrix" ]
[ "0.76569027", "0.75310546", "0.7445371", "0.7406346", "0.7397322", "0.73486006", "0.72921807", "0.7276845", "0.7246478", "0.71603876", "0.71544224", "0.7151582", "0.708686", "0.7067885", "0.7064713", "0.7061624", "0.7057902", "0.70148677", "0.70069176", "0.69840884", "0.69755006", "0.69609094", "0.6950411", "0.6932875", "0.6923857", "0.69233245", "0.69113487", "0.6882439", "0.6856382", "0.6845871", "0.6814588", "0.6810987", "0.67921245", "0.67769223", "0.67494005", "0.6708963", "0.669069", "0.6635994", "0.66139567", "0.66060495", "0.66048783", "0.6604787", "0.6590005", "0.6586237", "0.65720165", "0.6560596", "0.6530914", "0.6530028", "0.64909476", "0.64656794", "0.64309263", "0.64240235", "0.6368767", "0.6367298", "0.6363927", "0.6349667", "0.6346638", "0.63394356", "0.63391775", "0.63391775", "0.63391775", "0.63391775", "0.63391775", "0.63323694", "0.6328439", "0.63247335", "0.63154", "0.6304914", "0.62913346", "0.62878436", "0.6282993", "0.6275106", "0.62614816", "0.62519175", "0.624967", "0.62470603", "0.62448496", "0.62370104", "0.62343186", "0.62332565", "0.62326676", "0.62304145", "0.62271625", "0.62271625", "0.622372", "0.6207163", "0.62006664", "0.6193098", "0.61809254", "0.61493605", "0.61459637", "0.6138623", "0.6126078", "0.6125377", "0.61244714", "0.61181116", "0.6116154", "0.61076874", "0.6100571", "0.6099422" ]
0.73712176
5
Returns the inverse of the 3d rot and trans matrix
def inverse_transform3(phi, theta=0.0, psi=0.0, tx=0.0, ty=0.0, tz=0.0, mirror = 0, scale=1.0): d = Transform({'type': 'spider', 'phi': phi, 'theta': theta, 'psi': psi, 'tx': tx, 'ty': ty, 'tz': tz, "mirror":mirror,"scale":scale}) d = d.inverse() d = d.get_params("spider") return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse_rigid_trans(Tr): \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr", "def inverse(self):\n rotation_matrix = self.pose_mat[:3, :3]\n translation_vector = self.pose_mat[:3, 3]\n\n rot = np.transpose(rotation_matrix)\n trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n return Transformation(rot, trans)", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_GetInverse(self, *args)", "def rot_inv(self):\n if not hasattr(self, '_rot_inv'):\n self._rot_inv=np.linalg.inv(self.rot)\n return self._rot_inv", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def inv(T):\n K, L = T.shape[1:3]\n squ_matrix = np.einsum('ijkl->ikjl', T).reshape((K*L, K*L),order='F')\n t = np.linalg.inv(squ_matrix)\n return np.einsum('ijkl->ikjl', t.reshape((K,L,K,L), order='F'))", "def affine_transform_inverse(np_transform):\n rotation = np_transform[:3, :3]\n translation = np_transform[:3, 3]\n rotation_inv = numpy.linalg.inv(rotation)\n translation_inv = -1 * numpy.dot(rotation_inv, translation)\n result = numpy.identity(4)\n result[:3, :3] = rotation_inv\n result[:3, 3] = translation_inv.flatten()\n return result", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def trans_matrix_inv(m:numpy.ndarray):\n was2d = False\n if m.shape[1] == 3:\n was2d = True\n m = numpy.asarray([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, m[0,0], m[0,1], m[0,2]],\n [0.0, m[1,0], m[1,1], m[1,2]],\n [0.0, 0.0, 0.0, 1.0]], numpy.float64)\n trans = m[0:3,3]\n rotate = numpy.zeros(3, numpy.float64)\n r = m[0:3,0:3]\n rc = numpy.linalg.cholesky(numpy.matmul(r.T, r)).T\n scale = numpy.diagonal(rc)\n if numpy.linalg.det(r) < 0.0:\n scale[0] *= -1.0\n rcd = rc * numpy.eye(3, dtype=numpy.float64)\n rc = numpy.linalg.solve(rcd, rc)\n shear = numpy.asarray([rc[0,1], rc[0,2], rc[1,2]], numpy.float64)\n r0 = trans_matrix({'rotate': rotate, 'scale': scale, 'shear': shear})[0:3,0:3]\n r0 = numpy.linalg.solve(numpy.linalg.inv(r), numpy.linalg.inv(r0))\n rotate[1] = numpy.arcsin(_frone(r0[0,2]))\n if numpy.abs((numpy.abs(rotate[1]) - (numpy.pi / 2.0))) < 1.0e-6:\n rotate[0] = 0.0\n rotate[2] = numpy.arctan2(-_frone(r0[1,0]), _frone(-r0[2,0] / r0[0,2]))\n else:\n rc = numpy.cos(rotate[1])\n rotate[0] = numpy.arctan2(_frone(r0[1,2] / rc), _frone(r0[2,2] / rc))\n rotate[2] = numpy.arctan2(_frone(r0[0,1] / rc), _frone(r0[0,0] / rc))\n if was2d:\n trans = trans[1:]\n rotate = rotate[0:1]\n scale = scale[1:]\n shear = shear[2:3]\n return (trans, rotate, scale, shear)", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])", "def inverse_affine_transformation_matrix(A):\n n, n = A.shape\n\n # extract components R, an n-1xn-1 linear transformation matrix, and T, an nx1 translation matrix\n R = A[:n-1, :n-1]\n T = A[:n-1, n-1]\n\n # find R^-1\n R_inv = np.linalg.inv(R)\n\n # Find A^-1/A_inv\n A_inv = np.copy(A).astype(float) # copy A for base of A^-1 matrix and ensure it is of data type float\n A_inv[:n-1, :n-1] = R_inv # set top left nxn sub matrix equal to R^-1\n A_inv[:n-1, n-1] = np.negative(R_inv.dot(T)) # place -R^-1*T in top right corner\n\n return A_inv", "def inverse(self,mat):\n result = np.linalg.inv(mat)\n self.out = result\n return self.out", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def T_inv(T):\n R, xyz = rigmech.T2Rxyz(T)\n R_inv = R.T\n xyz_inv = -R_inv * xyz\n T_inv = R_inv.row_join(xyz_inv).col_join(sp.Matrix([[0, 0, 0, 1]]))\n return T_inv", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def inverse(self):\n rotation = self.rotation.inverse()\n translation = rotation * (-self.translation)\n\n return Transform(rotation, translation)", "def matrix_inv(mat):\n\ta = mat[0,0]\n\tb = mat[0,1]\n\tc = mat[0,2]\n\td = mat[1,0]\n\te = mat[1,1]\n\tf = mat[1,2]\n\tg = mat[2,0]\n\th = mat[2,1]\n\ti = mat[2,2]\n\n\tdet = b*f*g + c*d*h + a*e*i - a*f*h - b*d*i - c*e*g\n\n\tinvmat = np.zeros((3,3))\n\tinvmat[0,0] = (e*i - f*h) / det\n\tinvmat[0,1] = (c*h - b*i) / det\n\tinvmat[0,2] = (b*f - c*e) / det\n\tinvmat[1,0] = (f*g - d*i) / det\n\tinvmat[1,1] = (a*i - c*g) / det\n\tinvmat[1,2] = (c*d - a*f) / det\n\tinvmat[2,0] = (d*h - e*g) / det\n\tinvmat[2,1] = (b*g - a*h) / det\n\tinvmat[2,2] = (a*e - b*d) / det\n\treturn invmat", "def Invert(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Invert(*args, **kwargs)", "def ssc.inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def invert(M):\r\n M = isMatrix(M)\r\n return M.I", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def invert(x):\n return linalg.inv(x)", "def InverseRotation(rotation):\n return RotationMatrix([\n [rotation.rot[0][0], rotation.rot[1][0], rotation.rot[2][0]],\n [rotation.rot[0][1], rotation.rot[1][1], rotation.rot[2][1]],\n [rotation.rot[0][2], rotation.rot[1][2], rotation.rot[2][2]]\n ])", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def invert_affine_transform(matrix: Tensor) -> Tensor:\n if not isinstance(matrix, Tensor):\n raise TypeError(f\"Input matrix type is not a Tensor. Got {type(matrix)}\")\n\n if not (len(matrix.shape) == 3 and matrix.shape[-2:] == (2, 3)):\n raise ValueError(f\"Input matrix must be a Bx2x3 tensor. Got {matrix.shape}\")\n\n matrix_tmp: Tensor = convert_affinematrix_to_homography(matrix)\n matrix_inv: Tensor = _torch_inverse_cast(matrix_tmp)\n\n return matrix_inv[..., :2, :3]", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def inverseTransformationMatrix(self,index=None):\n if self.method == 'pca':\n if index is not None:\n coordinateIndex = distribution1D.vectori_cxx(len(index))\n for i in range(len(index)):\n coordinateIndex[i] = index[i]\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions(coordinateIndex)\n inverseTransformation = self._distribution.getInverseTransformationMatrix(coordinateIndex)\n else:\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions()\n inverseTransformation = self._distribution.getInverseTransformationMatrix()\n row = matrixDim[0]\n column = matrixDim[1]\n # convert 1D vector to 2D array\n L = np.atleast_1d(inverseTransformation).reshape(row,column)\n else:\n self.raiseAnError(NotImplementedError,' inverse transformationMatrix is not yet implemented for ' + self.method + ' method')\n return L", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def getDejaVuMatrix(self):\n mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion\n mtx[3] = self.getTranslation()\n mtx[:3, 3] = mtx[3, :3]\n mtx[3, :3] = [0, 0, 0]\n return mtx", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def inverse(self):\n ss = self._sum_of_squares()\n if ss > 0:\n return self.__class__(array=(self._vector_conjugate() / ss))\n else:\n raise ZeroDivisionError(\"a zero quaternion (0 + 0i + 0j + 0k) cannot be inverted\")", "def inv_rotation_matrix(self):\n return np.linalg.inv(self._rotation_matrix).tolist()", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def complex_inverse(c1,cr):", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def tensorinv(a, ind=2):\n return TensorInv(ind)(a)", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def inverse_transform(data, flip, rotate):\n if flip == 'FLIP_LEFT_RIGHT':\n data = np.fliplr(data)\n\n if rotate == 'ROTATE_90':\n data = np.rot90(data, 3)\n\n if rotate == 'ROTATE_180':\n data = np.rot90(data, 2)\n\n if rotate == 'ROTATE_270':\n data = np.rot90(data, 1)\n\n return data", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def transMatrix( source=None ):\n if source is None:\n return None,None\n else:\n (x,y,z) = source[:3]\n if x == y == z == 0.0:\n return None, None \n return tmatrixaccel.transMatrix( x,y,z ),tmatrixaccel.transMatrix( -x, -y, -z )", "def get_world_inv_matrix(m_obj, i):\n if not m_obj.hasFn(oMa.MFn.kTransform):\n return\n\n fn_obj = oMa.MFnDependencyNode(m_obj)\n plug = fn_obj.findPlug('worldInverseMatrix', False).elementByLogicalIndex(i)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n return matrix", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def __invert__(self):\n return self.inverse()", "def inverse_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n # Inverse Fourier Transform matrix:\n ift = np.zeros([x, y], complex)\n\n for i in range(0, x):\n for j in range(0, y):\n sum_ift = 0\n for u in range(0, x):\n for v in range(0, y):\n sum_ift = sum_ift + matrix[u, v] * (np.cos(((2 * np.pi) / N) * (u * i + v * j)) + 1j * np.sin(((2 * np.pi) / N) * (u * i + v * j)))\n\n ift[i, j] = sum_ift\n\n\n return ift/(x*x)", "def inverse(self):\n q_vector = np.zeros(4)\n q_vector[:3] = self.imaginary*-1\n q_vector[3] = self.w\n return Quat(q_vector,\"xyzw\")", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def inverse(self):\n return self.invert()", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def inv_inplace(a):", "def _inv(M):\n ll, mm = M.shape\n M2 = M + 1e-10 * np.eye(ll)\n L = np.linalg.cholesky(M2)\n inv_L = np.linalg.inv(L)\n inv_M = inv_L.T @ inv_L\n return inv_M", "def inverse(self):\n return self._inverse", "def inverse(self):\n group = self.group\n r = tuple([(i, -j) for i, j in self.array_form[::-1]])\n return group.dtype(r)", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def inverse(self, u: Tensor, covariates: Tensor) -> Tensor:\n return self.real_nvp.inverse(u, covariates)", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def inv(self):\n\t\tdeterminant = self.det()\n\t\tif determinant:\n\t\t\treturn self.adj() / determinant\n\t\telse:\n\t\t\traise ValueError(\"Not Invertible\")", "def invert(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot invert a non-square matrix\")\n if self.determinant == 0:\n raise exc.LinearAlgebraError(\"cannot invert a singular matrix\")\n # TODO: implement block matrices in their own method\n block_rows = [r1 + r2 for r1, r2 in\n zip(self.data, self.makeIdentity(self.m).data)]\n inverse_block = Matrix.fromRows(block_rows).row_reduce()\n return inverse_block.subset([i for i in range(self.m)],\n [j + self.n for j in range(self.n)])", "def inv(self):\n return self.conjugate()", "def inverse(self):\n n = self.norm()\n c = self.conj()\n d = 1.0 / (n * n)\n c.scale(d)\n return c", "def inverse(self):\n real = self.real / self.magnitude()\n pure = -self.pure / self.magnitude()\n return Quaternion((real, pure))", "def quaternion_inv(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n numpy.negative(q[1:], q[1:])\r\n return q / numpy.dot(q, q)", "def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")", "def inverseIntermediateJac(self,x):\n \n Ri = self._rotation.T\n si = (1./self._scaled).reshape((1,1,self._dim))\n \n Jac = self.intermediateJacPol2Rot(x)\n \n #Ri.J\n Jac = np.einsum(\"jk,ikl->ijl\",Ri,Jac)\n #(Ri.J).diag(si)\n Jac *= si\n \n return Jac", "def inverse_3by3_double(M):\n if len(M.shape) > 1:\n M = M.flatten()\n\n M = np.array(M, 'float')\n\n determinant = 0.\n adj_M = np.zeros((9,), 'float')\n\n # First row of adjugate matrix\n adj_M[0] = (M[4] * M[8] - M[7] * M[5]) # Det #0\n adj_M[1] = -(M[1] * M[8] - M[7] * M[2])\n adj_M[2] = (M[1] * M[5] - M[4] * M[2])\n\n # Second row of adjugate matrix\n adj_M[3] = -(M[3] * M[8] - M[6] * M[5]) # Det #1\n adj_M[4] = (M[0] * M[8] - M[6] * M[2])\n adj_M[5] = -(M[0] * M[5] - M[3] * M[2])\n\n # Third row of adjugate matrix\n adj_M[6] = (M[3] * M[7] - M[6] * M[4]) # Det #2\n adj_M[7] = -(M[0] * M[7] - M[6] * M[1])\n adj_M[8] = (M[0] * M[4] - M[3] * M[1])\n\n determinant += M[0] * adj_M[0]\n determinant += M[1] * adj_M[3] # Using addition since minus is integrated in adjugate matrix.\n determinant += M[2] * adj_M[6]\n\n return (adj_M / determinant)", "def BackTransform(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_BackTransform(self, *args)", "def transformation_matrix(self, s1, s2, s3, t1, t2, t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array(\n [\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]],\n ]\n )\n\n P = np.array([[s2[0] - s1[0], s2[1] - s1[1]], [s3[0] - s1[0], s3[1] - s1[1]]])\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2, np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n self.log.error(\"An error occured during the transformation.\", exc_info=True)\n return -1, -1\n\n return T, V0", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def getTransposeMatrix(self) -> CMatrix4:\n ..." ]
[ "0.75761485", "0.7515526", "0.7423403", "0.7418693", "0.73657846", "0.7288308", "0.7254451", "0.7109498", "0.7077596", "0.7040509", "0.700391", "0.6975826", "0.6933009", "0.6900903", "0.68664896", "0.6786958", "0.6786305", "0.6780128", "0.6755011", "0.67143685", "0.6650445", "0.66385657", "0.6633354", "0.6624766", "0.66143036", "0.6612461", "0.6605213", "0.6582941", "0.6561472", "0.6557437", "0.6527339", "0.6505916", "0.649954", "0.64896667", "0.6478087", "0.64711523", "0.6443079", "0.64137226", "0.6410265", "0.64046216", "0.6379611", "0.63641703", "0.6352396", "0.635068", "0.6349814", "0.634266", "0.6339411", "0.63392603", "0.63342464", "0.63139284", "0.63096863", "0.63089234", "0.6298493", "0.62805563", "0.6243935", "0.6226705", "0.6206828", "0.6178754", "0.6172308", "0.61550295", "0.615501", "0.61522955", "0.61468667", "0.61416644", "0.61406046", "0.61365795", "0.6124081", "0.6123134", "0.61064184", "0.6080443", "0.60758823", "0.60733616", "0.605772", "0.60568655", "0.6040437", "0.60271645", "0.602043", "0.60196054", "0.60136163", "0.60136163", "0.60136163", "0.60136163", "0.60136163", "0.60130656", "0.6000585", "0.5996734", "0.5985868", "0.5982421", "0.59794414", "0.5977023", "0.5974251", "0.59635097", "0.5961458", "0.59595776", "0.5959085", "0.59561515", "0.5950094", "0.5942188", "0.59249103", "0.5910203" ]
0.6268844
54
Create a list of available symmetries
def list_syms(): SymStringVec=[]; SymStringVec.append("CSYM"); SymStringVec.append("DSYM"); SymStringVec.append("TET_SYM"); SymStringVec.append("OCT_SYM"); SymStringVec.append("ICOS_SYM"); SymStringVec.append("ISYM"); return SymStringVec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_symmetries(self):\n temp = self._properties.get('symmetries', [])\n return temp", "def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}", "def z2_symmetries(self) -> \"Z2Symmetries\":\n return self._z2_symmetries", "def get_symbols(self):\n\n raise NotImplementedError('''\n Must implement get_symbols. Call help() for details.\n ''')", "def get_symbols(**kwargs):\n return Symbols(**kwargs).fetch()", "def itersymbols(self):\n for syms in self._symbols.itervalues():\n for sym in syms:\n yield sym", "def get_symbol(self):\n return []", "def get_symbols_list(self):\n return self.symbols_list", "def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)", "def list_symbol_tables(mst):\n stlist = []\n def append_st(st):\n #print(st)\n stlist.append(st)\n for s in st.get_symbols():\n for ns in s.get_namespaces():\n append_st(ns)\n if not isinstance(mst, symtable.SymbolTable):\n # Assume it is text of a program to compile\n mst = symtable.symtable(mst, '<string>', 'exec')\n append_st(mst)\n return stlist", "def _create_symbol_mapping():\n normal_items = [\"+\", \"-\"]\n unicode_items = [chr(0x2070 + i) for i in range(10, 12)]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_items, unicode_items))", "def symbols(self):\n pass", "def __init__(self, symbols):\r\n self.symbols = set(symbols)", "def get_symbols(self, type_name):\n return self._symtab[type_name].get_symbols()", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def symbols(self) -> List[SingleMapping]:\n return self._symbols", "def getSymbols(self):\n return self.alpha.getSymbols()", "def lookup_option_symbols(self, underlying: str) -> List[Symbol]:\n url = \"/v1/markets/options/lookup\"\n params = {\"underlying\": underlying}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.symbols", "def __init__(self):\r\n self.s_table = SymbolTable.preSymbols", "def print_symbols():\n\n global program\n if program is None:\n print \"no program is loaded\"\n return\n for(s, a) in program.symbols():\n print \"0x{:x} : {}\".format(a, s)", "def getSymbolMap():\n name = os.path.join(os.path.dirname(__file__), 'nasdaq_nasdaqcom.csv')\n symbols = TickerSymbols(name)\n return symbols.getNameToTicker()", "def getSymmetries(self, board, pi):\n return [(board, pi), (board[:, ::-1], pi[::-1])]", "def get_symbols(self):\n symbols = os.environ.get('SYMBOLS', 'btc,eth')\n if not symbols:\n return 'btc,eth'\n return symbols", "def popSym(self):\r\n res=self.dbConn.execute(\"SELECT * FROM Symposia ORDER BY StartDate\").fetchall()\r\n self.symList = [ln[\"Name\"] for ln in res]\r\n self.symID_Title = {ln[\"SymposiumID\"]:ln[\"Name\"] for ln in res}\r\n for ln in res:\r\n self.symID_Title[ln[\"Name\"]] = ln[\"SymposiumID\"]", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def getSymbols(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> List[ghidra.program.model.symbol.Symbol]:\n ...", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def ionic_symbols(self) -> list[str]:\n return self.to_list().symbols", "async def get_symbols(self):\n uri = \"/v3/spot/symbols\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "def getSymbolTable(self) -> ghidra.app.util.bin.format.pe.debug.DebugCodeViewSymbolTable:\n ...", "def get_existing_symbols():\n instruments = pyRofex.get_all_instruments()[\"instruments\"]\n existing_symbols = []\n for instrument in instruments:\n existing_symbols.append(instrument[\"instrumentId\"][\"symbol\"])\n return existing_symbols", "def symbol_table(self) -> str:\n return self._symbol_table", "def find_symbols(self, **kw):\n return list(self.ifind_symbols(**kw))", "def add_symbols(self, lst):\n for ii in lst:\n self.__symbols += [Symbol(ii, self)]", "def get_iex_symbols(**kwargs):\n return IEXSymbols(**kwargs).fetch()", "def symbolsAssign(clusters):\n \n alphabet = ['A','a','B','b','C','c','D','d','E','e',\n 'F','f','G','g','H','h','I','i','J','j',\n 'K','k','L','l','M','m','N','n','O','o',\n 'P','p','Q','q','R','r','S','s','T','t',\n 'U','u','V','v','W','w','X','x','Y','y','Z','z']\n \n clusters = pd.Series(clusters)\n N = len(clusters.unique())\n\n cluster_sort = [0] * N \n counter = collections.Counter(clusters)\n for ind, el in enumerate(counter.most_common()):\n cluster_sort[ind] = el[0]\n\n if N >= len(alphabet):\n alphabet = [chr(i+33) for i in range(0, N)]\n else:\n alphabet = alphabet[:N]\n hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))\n strings = [hashm[i] for i in clusters]\n return strings, hashm", "def get_all_cipher():\n return OpenSSL.cipher_algo.keys()", "async def init_trading_pair_symbols(cls, shared_session: Optional[aiohttp.ClientSession] = None):\n client_session = shared_session or aiohttp.ClientSession()\n async with client_session as client:\n async with client.get(BeaxyConstants.PublicApi.SYMBOLS_URL, timeout=5) as response:\n if response.status == 200:\n symbols: List[Dict[str, Any]] = await response.json()\n cls._trading_pair_symbol_map = {\n symbol_data[\"name\"]: (f\"{symbol_data['baseCurrency']}-{symbol_data['termCurrency']}\")\n for symbol_data in symbols\n }\n else:\n IOError(f\"There was an error requesting the list of symbols from the exchange ({response})\")", "def find_symbols(lst):\n ret = []\n for ii in lst:\n ret += [find_symbol(ii)]\n return ret", "def _binaries_to_symbolize(self):\n raise NotImplementedError()", "def __generate_symboltable(self, code):\n\n code_without_lables = []\n address = 0\n for line in code:\n label_code = line.split(':')\n label = label_code[0]\n if len(label) != len(line):\n self.__symboltable[label] = address\n address += REG_SIZE\n instruction = label_code.pop().strip()\n code_without_lables = code_without_lables + [instruction]\n else:\n instruction = label_code.pop().strip()\n code_without_lables = code_without_lables + [instruction]\n\n tokens = instruction.split(' ')\n asm_directive = tokens[0]\n if tokens[0] in AssemblerDirectives.to_string():\n if asm_directive == AssemblerDirectives.ORG.name:\n address = int(tokens[1])\n else:\n address += REG_SIZE\n\n return code_without_lables", "def init_dict() -> None:\n for elem in letters:\n ascii_dict[elem] = []\n for elem in numbers:\n ascii_dict[elem] = []\n for elem in symbols:\n ascii_dict[elem] = []", "def list_ciphers():\n global AVAILABLE_CIPHERS\n print(\"[!] Available ciphers: \")\n for i in range(len(AVAILABLE_CIPHERS)):\n print(\" ----> %s.%s\"%(i+1, AVAILABLE_CIPHERS[i]))\n exit()", "def list():\n\n return cache.codeTableList()", "def get_currencies_names():\n names = [x for x in cur_dict]\n return names", "def _get_paths(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle':\n '\"M\"+b1+\",0A\"+b1+\",\"+b1+\" 0 1,1 0,-\"+b1+\"A\"+b1+\",\"+b1+\" 0 0,1 \"+b1+\",0Z\"',\n 'square':\n '\"M\"+b1+\",\"+b1+\"H-\"+b1+\"V-\"+b1+\"H\"+b1+\"Z\"',\n 'diamond':\n '\"M\"+b1+\",0L0,\"+b1+\"L-\"+b1+\",0L0,-\"+b1+\"Z\"',\n 'hexagram':\n '\"M-\"+b3+\",0l-\"+b2+\",-\"+b1+\"h\"+b3+\"l\"+b2+\",-\"+b1+\"l\"+b2+\",\"+b1+\"h\"+b3+\"l-\"+b2+\",\"+b1+\"l\"+'\n 'b2+\",\"+b1+\"h-\"+b3+\"l-\"+b2+\",\"+b1+\"l-\"+b2+\",-\"+b1+\"h-\"+b3+\"Z\"'\n }[symbol]\n return {\n 37: '\"M-\"+d1+\",\"+d3+\"L0,0M\"+d1+\",\"+d3+\"L0,0M0,-\"+d2+\"L0,0\"',\n 38: '\"M-\"+d1+\",-\"+d3+\"L0,0M\"+d1+\",-\"+d3+\"L0,0M0,\"+d2+\"L0,0\"',\n 39: '\"M\"+d3+\",\"+d1+\"L0,0M\"+d3+\",-\"+d1+\"L0,0M-\"+d2+\",0L0,0\"',\n 40: '\"M-\"+d3+\",\"+d1+\"L0,0M-\"+d3+\",-\"+d1+\"L0,0M\"+d2+\",0L0,0\"',\n 34: '\"M\"+d1+\",\"+d1+\"L-\"+d1+\",-\"+d1+\"M\"+d1+\",-\"+d1+\"L-\"+d1+\",\"+d1',\n 33: '\"M0,\"+d1+\"V-\"+d1+\"M\"+d1+\",0H-\"+d1',\n 35: '\"M0,\"+d1+\"V-\"+d1+\"M\"+d1+\",0H-\"+d1+\"M\"+d2+\",\"+d2+\"L-\"+d2+\",-\"+d2+\"M\"+d2+\",-\"+d2+\"L-\"+d2+\",\"+d2',\n 36: '\"M\"+d1+\",\"+d2+\"V-\"+d2+\"m-\"+d2+\",0V\"+d2+\"M\"+d2+\",\"+d1+\"H-\"+d2+\"m0,-\"+d2+\"H\"+d2'\n }[symbol]", "def build_gdb_symbol_table():\n\n tab = Symtab()\n n = gdb.parse_and_eval (\"symtab->nodes\")\n while (long(n)):\n if symtab_node_is_function (n):\n current_symbol = GdbFunction(tab, n)\n tab.all_functions.append (current_symbol)\n elif symtab_node_is_variable (n):\n current_symbol = GdbVariable(tab, n)\n tab.all_variables.append (current_symbol)\n else:\n raise gdb.GdbError (\"Encountered an unknown symbol table node\");\n\n tab.order_to_sym[current_symbol.order] = current_symbol\n tab.all_symbols.append (current_symbol)\n\n n = n[\"next\"]\n pass\n\n tab.fixup()\n return tab", "def get_generating_symbols(self) -> AbstractSet[CFGObject]:\n if self._generating_symbols is None:\n self._generating_symbols = self._get_generating_or_nullable(False)\n return self._generating_symbols", "def query_all_symbols(self):\n return self._call_txtrader_api('query_symbols', {'data': True})", "def _get_symbols(exchange_code: str, token: str) -> List[mtypes.Symbol]:\n _LOG.info(\"Getting symbols list for exchange: '%s'\", exchange_code)\n response = get_client().service.SymbolList(\n Token=token, Exchange=exchange_code\n )\n\n if response.SYMBOLS is None:\n _LOG.error(\"No symbols found for exchange: '%s'\", exchange_code)\n return []\n\n symbols = [\n mtypes.Symbol.from_dict(d=obj)\n for obj in zeep.helpers.serialize_object(response.SYMBOLS[\"SYMBOL\"])\n ]\n\n _LOG.info(\"Got %s symbols for exchange '%s'\", len(symbols), exchange_code)\n return symbols", "def ifind_symbols(self, name=\"any\", **kw):\n for sym in self.itersymbols():\n if (name==\"any\" or name==sym.sym.name) and \\\n sym.sym.k==kw:\n yield sym.sym", "def get_symbols(request):\n symbols = list(Stock.objects.values('symbol').distinct())\n return JsonResponse(symbols, safe=False)", "def create_state_symbols(nVars,nParams):\n\n nSensitivityEqs = nVars * nParams\n\n #state variables\n x_sp = np.array(sp.symbols('x0:' + str(nVars)))\n\n #sensitivity variables\n sensitivity_sp = np.array(list(sp.symbols('s0:' + str(nSensitivityEqs))))\n\n return [x_sp,sensitivity_sp]", "def generate_symbol_definitions(mode, symbols, prefix, definition):\n direct = []\n tabled = []\n for ii in symbols:\n direct += [ii.generate_rename_direct(prefix)]\n tabled += [ii.generate_rename_tabled(prefix)]\n if \"vanilla\" == mode:\n tabled = direct\n return template_symbol_definitions % (definition, \"\\n\".join(direct), \"\\n\".join(tabled))", "def get_codecs_list():\n for codec in CODECS_IN_FILE.iterkeys():\n print codec", "def encodings():\n from . import factory\n return factory.MAPPINGS.keys()", "def slot_key_db() -> Dict[str, List]:\n\n return {'q50': 'second_person_plural',\n 'q28': 'cot_caught',\n 'q80': 'rain_sun',\n 'q66': 'crawfish',\n 'q110': 'halloween',\n 'q64': 'sandwich',\n 'q90': 'side_road',\n 'q105': 'beverage',\n 'q73': 'shoes',\n 'q79': 'highway',\n 'q58': 'yard_sale',\n 'q107': 'rubbernecking',\n 'q94': 'frosting',\n 'q14': 'lawyer',\n 'q76': 'kitty_corner',\n 'q65': 'firefly',\n 'q60': 'verge',\n 'q118': 'brew_thru',\n 'q103': 'water_fountain'}", "def get_short_currencies_names():\n short_names = [[x, cur_dict[x][1]] for x in cur_dict]\n return short_names\n # return [['Bitcoin','BTC'], ['Litecoin', 'LTC']]", "def test_find_z2_symmetries_op_without_sym(self):\n qubit_op = SparsePauliOp.from_list(\n [\n (\"I\", -1.0424710218959303),\n (\"Z\", -0.7879673588770277),\n (\"X\", -0.18128880821149604),\n ]\n )\n z2_symmetries = Z2Symmetries.find_z2_symmetries(qubit_op)\n self.assertTrue(z2_symmetries.is_empty())", "def codelists():\n return CodelistSet()", "def test_get_pci_switch_list(self):\n pass", "def _get_v2_symbols(self, assets):\n\n v2_symbols = []\n for asset in assets:\n v2_symbols.append(self._get_v2_symbol(asset))\n\n return v2_symbols", "def perms_sym_init(g, sym=None):\n if g is None or len(g) == 0:\n if sym is None:\n sym = SymmetricGroup(0)\n return sym, [sym([])]\n\n if sym is None:\n domain = set().union(*[perm_sym_domain(gg) for gg in g])\n if all(isinstance(s, (Integer, int, long)) and s > 0 for s in domain):\n domain = max(domain)\n else:\n domain = sorted(domain)\n sym = SymmetricGroup(domain)\n\n try:\n return sym, [sym(u) for u in g]\n except (ValueError, TypeError):\n return sym, None", "def filter_compatible(symmetries, *, structure):\n raise ValueError(\n \"Unrecognized type '{}' for 'symmetries'\".format(type(symmetries))\n )", "def singleglyph(x):\n return [glyph(x)]", "def _gen_freeze_scheme():\n freeze_scheme = {}\n\n for key in SCHEME_KEYS:\n paths = []\n for scheme_name, install_scheme in INSTALL_SCHEMES.iteritems():\n val = install_scheme[key]\n if scheme_name == 'unix_home':\n val = val.replace('$base', '$home', 1)\n else:\n val = val.replace('$base', '$prefix', 1)\n val = val.replace('$platbase', '$exec_prefix', 1)\n paths.append(val)\n freeze_scheme[key] = paths\n\n return freeze_scheme", "def symbols(self, symbols: List[SingleMapping]):\n\n self._symbols = symbols", "def set_symbols_from(self,sigma=_Set())-> None:\n \n m=len(self.chars)\n \n _s=[]\n symbols=set()\n for e in sigma.symbols:\n \"ignoring the empty word\"\n if not e == '':\n symbols.add(e)\n \n for s in symbols:\n _s=split(s)\n n=len(s)\n for i in range(0,m-n+1):\n if _s==self.chars[i:i+n]:\n self.symbols[i]=s\n i=+1\n\n \"\"\"Eliminating None values from the list object self.symbols\"\"\"\n temp=[]\n for elt in self.symbols:\n if elt==None:\n pass\n else:\n temp.append(elt)\n self.symbols=temp", "def query_symbols(self):\n return self._call_txtrader_api('query_symbols', {'data': False})", "def get_palette_names(scheme: ColorScheme | ColorSchemeShort) -> list[str]:\n mod = get_palette_module(scheme)\n names = mod.__all__\n return names.copy()", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)", "def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)", "def symbols_details(self):\n pass", "def _symbols_of_input(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a state transition arc for each digit of a multi-digit number.\n if \"[\" not in label:\n return list(label)\n\n # We add a state transition arc for each inflectional or derivational\n # morpheme, inflectional group boundary, and proper noun analysis tag.\n return _SYMBOLS_REGEX.findall(label)", "def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))", "def get_related_forex_currencies(self, currency = None):\n new_symbol_names_list = []\n for symbol_name in self.symbol_names_list:\n if (symbol_name[:3] == currency ) | (symbol_name[3:] == currency ):\n new_symbol_names_list.append(symbol_name)\n symbol_names_list = new_symbol_names_list\n return symbol_names_list", "def gene_symbol_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene Symbol\"]\n resD[keyI] = valueI\n\n return resD", "def _getAvailableMethods(self):\n bsc = getToolByName(self, 'bika_setup_catalog')\n items = [(c.UID, c.Title) \\\n for c in bsc(portal_type='Method',\n inactive_state = 'active')]\n items.sort(lambda x,y:cmp(x[1], y[1]))\n items.insert(0, ('', t(_('None'))))\n return DisplayList(items)", "def symbolize_pairs(list_of_pair_string: str) -> list:\n symbolized_pairs = []\n for pair in list_of_pair_string:\n symbolized_pairs.append(pair[0] + '-' + pair[1])\n\n return symbolized_pairs", "def symbol_state_map(self):\n map = {}\n for state in self:\n map[state.symbol] = state\n map.update(self.symbol_synonyms)\n if not self.case_sensitive:\n for state in self:\n if state.symbol.islower():\n map[state.symbol.upper()] = state\n else:\n map[state.symbol.lower()] = state\n for symbol, state in self.symbol_synonyms.items():\n if symbol.islower():\n map[symbol.upper()] = state\n else:\n map[symbol.lower()] = state\n return map", "def _get_symbols(self):\n\n symbols = self.get_symbols()\n\n if isinstance(symbols, dict):\n keys = ['symbol', 'from_symbol', 'to_symbol']\n correct_keys = np.isin(keys, list(symbols.keys())).all()\n\n if not correct_keys:\n raise ImplementationError('''\n Dict should be in the form:\n {'symbol':[], 'from_symbol':[], 'to_symbol':[]}\n ''')\n else:\n symbols = pd.DataFrame(symbols, index = [symbols['symbol']])\n\n symbols.index = symbols.symbol\n\n return symbols", "def symbols_zero_spin(self) -> List[str]:\n return self._symbols_zero_spin", "def IterUniqueSymbols(self):\n return SymbolGroup._IterUnique(self)", "def set_symbols(self, symboltable: dict):\n\n for index in range(1, self.symbol_layout.rowCount()):\n self.symbol_layout.removeRow(index)\n\n font = QFont('Fira Code', 8, QFont.Medium)\n for entry in symboltable:\n symbol = QLineEdit()\n symbol.setReadOnly(True)\n symbol.setText(entry)\n symbol.setFont(font)\n address = QLineEdit()\n address.setReadOnly(True)\n address.setFont(font)\n address.setText(str(symboltable[entry]))\n self.symbol_layout.addRow(address, symbol)", "def list():\n return [Drive.ENCODER_L,\n Drive.ENCODER_R]", "def sym_elements(self):\n def trans(name, *sym):\n t = Element.TRANSFORMS\n n = name.split('_')\n\n for x in sym:\n n[-1] = t[x][n[-1]]\n\n return '_'.join(n)\n\n def primary():\n e = self.copy()\n e.name = '{}_p'.format(self.name)\n return e\n\n def x_sym():\n e = self.copy()\n e.name = '{}_x'.format(self.name)\n e.inode = trans(self.inode, 'x')\n e.jnode = trans(self.jnode, 'x')\n return e\n\n def y_sym():\n e = self.copy()\n e.name = '{}_y'.format(self.name)\n e.inode = trans(self.inode, 'y')\n e.jnode = trans(self.jnode, 'y')\n return e\n\n def xy_sym():\n e = self.copy()\n e.name = '{}_xy'.format(self.name)\n e.inode = trans(self.inode, 'x', 'y')\n e.jnode = trans(self.jnode, 'x', 'y')\n return e\n\n if self.symmetry is None:\n return primary(),\n\n elif self.symmetry == 'x':\n return primary(), x_sym()\n\n elif self.symmetry == 'y':\n return primary(), y_sym()\n\n elif self.symmetry == 'xy':\n return primary(), x_sym(), y_sym(), xy_sym()", "def create_possible_symbols_to_cells_mapping(self):\r\n possibles_to_cells = defaultdict(set)\r\n for cell in self.iterate_empty_cells():\r\n possibles_to_cells[frozenset(cell.get_possible_symbols())].add(cell)\r\n return possibles_to_cells", "def r_symbols(size, symbols, length, used=None):\n if length == 1 and not used:\n return R.sample(symbols, size)\n rset, used = set(), set(used or [])\n while len(rset) < size:\n s = r_string(symbols, R.randint(1, length))\n if s not in used:\n rset.add(s)\n return list(rset)", "def get_all_symbolic_models(self):\n return get_symbolic_constraint_models(self.constraints)", "def _init_symbol_tracker(self):\n # Initialize with an empty set\n atoms_indx = {symb: set([]) for symb in self.symbols}\n\n # Populate the sets\n for atom in self.atoms:\n symb = atom.symbol\n atoms_indx[symb].add(atom.index)\n return atoms_indx", "def sym(self) -> np.ndarray:\n if self._sym is None:\n self._sym = symmetrize_discrete_vector_field(self.F, mode=\"sym\")\n return self._sym", "def __init__(self, sym: ghidra.program.model.pcode.HighSymbol):\n ...", "def get_symbol(symbols):\n # Figure out if list of symbols or single symbol.\n if not hasattr(symbols, '__getitem__'):\n symbols = [symbols]\n elif len(symbols) == 3 and symbols[0] in ('p', 'P'):\n # Most likely a polygon specification (at least not a valid other\n # symbol).\n symbols = [symbols]\n\n symbols = [symbol_dict[symbol] if symbol in symbol_dict else symbol for\n symbol in symbols]\n\n paths = []\n for symbol in symbols:\n if isinstance(symbol, matplotlib.path.Path):\n return symbol\n elif hasattr(symbol, '__getitem__') and len(symbol) == 3:\n kind, n, angle = symbol\n\n if kind in ['p', 'P']:\n if kind == 'p':\n radius = 1. / cos(pi / n)\n else:\n # make the polygon such that it has area equal\n # to a unit circle\n radius = sqrt(2 * pi / (n * sin(2 * pi / n)))\n\n angle = pi * angle / 180\n patch = matplotlib.patches.RegularPolygon((0, 0), n,\n radius=radius,\n orientation=angle)\n else:\n raise ValueError(\"Unknown symbol definition \" + str(symbol))\n elif symbol == 'o':\n patch = matplotlib.patches.Circle((0, 0), 1)\n\n paths.append(patch.get_path().transformed(patch.get_transform()))\n\n return paths", "def findBrains():\r\n keys = livingDead.Frankenstein.genKeys()\r\n return keys", "def set_symbols(self, symbols=None):\n if symbols and isinstance(symbols, str):\n if symbols.find(',') != -1:\n symbols_list = symbols.split(',')\n for item in symbols_list:\n self.symbols.append(item.strip())\n else:\n self.symbols.append(symbols)\n else:\n self.symbols = []", "def _symbols_of_output(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a new state transition arc for each character of the output token.\n return list(label)", "def __init__(self, DEBUG=False):\n self.DEBUG = DEBUG\n\n self.classTable = {}\n self.subroutineTable = {}\n\n self.counts = {}\n self.counts[\"STATIC\"] = 0\n self.counts[\"FIELD\"] = 0\n self.counts[\"ARG\"] = 0\n self.counts[\"VAR\"] = 0\n\n if self.DEBUG:\n print(\"DEBUG(SymbolTable): INITIALIZED SYMBOL TABLES\")", "def sym_nodes(self):\n def primary():\n n = self.copy()\n n.name = '{}_p'.format(self.name)\n return n\n\n def x_sym():\n n = self.copy()\n n.name = '{}_x'.format(self.name)\n n[1] *= -1\n return n\n\n def y_sym():\n n = self.copy()\n n.name = '{}_y'.format(self.name)\n n[0] *= -1\n return n\n\n def xy_sym():\n n = self.copy()\n n.name = '{}_xy'.format(self.name)\n n[:2] *= -1\n return n\n\n if self.symmetry is None:\n return primary(),\n\n elif self.symmetry == 'x':\n return primary(), x_sym()\n\n elif self.symmetry == 'y':\n return primary(), y_sym()\n\n elif self.symmetry == 'xy':\n return primary(), x_sym(), y_sym(), xy_sym()", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]" ]
[ "0.7745224", "0.669557", "0.6515156", "0.63807356", "0.63562876", "0.6307514", "0.6297486", "0.6230556", "0.61550385", "0.6110371", "0.60140777", "0.600392", "0.58965176", "0.5893767", "0.5836222", "0.58326477", "0.5825521", "0.5808387", "0.57900697", "0.5775794", "0.57617795", "0.5722399", "0.5715164", "0.5712698", "0.56934786", "0.56902784", "0.5653735", "0.56444454", "0.56345713", "0.5575523", "0.5574133", "0.5573711", "0.55713034", "0.55576456", "0.5502158", "0.54827964", "0.54524845", "0.5438015", "0.54110336", "0.54000306", "0.53952265", "0.5394388", "0.53913903", "0.53579646", "0.53526795", "0.53345627", "0.5327537", "0.531732", "0.5302704", "0.5281369", "0.5279933", "0.5279217", "0.5278623", "0.5272957", "0.526886", "0.5265955", "0.5263171", "0.52604216", "0.5256973", "0.5242773", "0.52398944", "0.5230909", "0.52263564", "0.5219636", "0.5219259", "0.52074474", "0.52036786", "0.51703924", "0.5151208", "0.5147567", "0.513742", "0.51361126", "0.51361126", "0.5132553", "0.5132269", "0.5123207", "0.510926", "0.51091474", "0.51076", "0.5105021", "0.51013315", "0.5091543", "0.5086675", "0.5086598", "0.5083679", "0.50824267", "0.5076769", "0.50710225", "0.5069725", "0.50670815", "0.5054676", "0.505128", "0.50496763", "0.5011902", "0.5007575", "0.49962187", "0.4989683", "0.4988482", "0.49785995", "0.4971149" ]
0.69593567
1
Create a centered circle (or sphere) having radius r.
def model_circle(r, nx, ny, nz=1): e = EMData() e.set_size(nx, ny, nz) e.process_inplace("testimage.circlesphere", {"radius":r, "fill":1}) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_circle(x, y, r):\n\tnew_circle = Circle()\n\tnew_circle.x = x\n\tnew_circle.y = y\n\tnew_circle.r = r\n\treturn new_circle", "def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def circle(radius, center, dim):\n kern = np.zeros(shape=(radius*2,radius*2))\n kern[draw.circle(r=radius, c=radius, radius=radius)] = 1\n return kern", "def circle(self, center, radius, color=(255, 255, 255), width=0):\n center = self._transform(center)\n pygame.draw.circle(self.screen, color, center, radius, width)", "def generate_circle(R,center,N=100,t0=0.0,t1=2.0*np.pi):\r\n theta = np.linspace(t0,t0+t1,N)\r\n y = R*np.sin(theta) + center[1]\r\n x = R*np.cos(theta) + center[0]\r\n return x,y", "def get_circle_coords(center, r):\n circle = [[r, 180* phi/3.14159265] for phi in range(0, 180, 5)]\n circle = [pol2cart(p[0], p[1]) + (center[0], center[1]) for p in circle]\n return circle", "def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)", "def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')", "def circle(center, radius, *args, **kwargs):\n return patch.Circle(center, radius, *args, **kwargs)", "def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)", "def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))", "def circle(self):\n return circle(self.N, self.o, self.r)", "def __init__(self,r):\n self.radius = r\n self.uc_centered_a = r\n self.uc_centered_b = r*np.sqrt(3.0)", "def DrawCircle(self, center, radius, color, drawwidth=1):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, drawwidth)", "def circle(r=0):\n\tteta = 2*pi*random()\n\tx = (r+1)*cos(teta) + L//2\n\ty = (r+1)*sin(teta) + L//2\n\t\n\ti = int(x) + 1\n\tj = int(y) + 1\n\tprint(r)\n\treturn i,j", "def drawCircle(self, r):\n assert (type(r) in [int, float]), \"parameter r:%s is not a valid number\" % `r` \n x = self._turtle.xcor()\n y = self._turtle.ycor()\n \n # Move the pen into position\n fstate = self._turtle.pendown()\n if fstate:\n self._turtle.penup()\n self._turtle.setposition(x, y-r)\n if fstate:\n self._turtle.pendown()\n \n # Draw the circle and fill if necessary\n self._turtle.circle(r)\n self.flush()\n self._turtle.forward(0)\n \n # Return the pen to the position\n if fstate:\n self._turtle.penup()\n self._turtle.setposition(x, y)\n if fstate:\n self._turtle.pendown()", "def Circle(radius=0.5, resolution=100):\n points = np.zeros((resolution, 3))\n theta = np.linspace(0.0, 2.0 * np.pi, resolution, endpoint=False)\n points[:, 0] = radius * np.cos(theta)\n points[:, 1] = radius * np.sin(theta)\n cells = np.array([np.append(np.array([resolution]), np.arange(resolution))])\n return wrap(pyvista.PolyData(points, cells))", "def circle(self, x, y, r, solid = False):\n px = 0\n py = r\n d = 1 - 2 * r\n err = 0\n while py >= 0:\n if solid:\n for i in range(x - px, x + px + 1):\n self.pixel(i, y + py, 1)\n self.pixel(i, y - py, 1)\n else:\n self.pixel(x + px, y + py, 1)\n self.pixel(x + px, y - py, 1)\n self.pixel(x - px, y + py, 1)\n self.pixel(x - px, y - py, 1)\n err = 2 * (d + py) - 1\n if d < 0 and err <= 0:\n px += 1\n d += 2 *px + 1\n else:\n err = 2 * (d - px) - 1\n if d > 0 and err > 0:\n py -= 1\n d += 1 - 2 * py\n else:\n px += 1\n d += 2 * (px - py)\n py -= 1", "def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self", "def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self", "def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius", "def circle_area(r):\n if r < 0:\n raise ValueError(\"Radius cannot be negative\")\n\n return pi*(r**2)", "def circle(cls, radius, position, open_circle=False):\n\n nb_points = 2*np.pi*radius/1\n points1 = radius*np.transpose(np.concatenate(([np.cos(2*np.pi*np.arange(0,nb_points+1)/nb_points)],[np.sin(2*np.pi*np.arange(0,nb_points+1)/nb_points)]),axis=0))\n \n for y in range(points1.shape[0]):\n points1[y,:]=points1[y,:]+position\n \n circle_obj = cls()\n circle_obj.coord = [points1]\n circle_obj.open = open_circle\n return circle_obj", "def circle(self, center_x, center_y, radius, color):\n x = radius - 1\n y = 0\n d_x = 1\n d_y = 1\n err = d_x - (radius << 1)\n while x >= y:\n self.pixel(center_x + x, center_y + y, color)\n self.pixel(center_x + y, center_y + x, color)\n self.pixel(center_x - y, center_y + x, color)\n self.pixel(center_x - x, center_y + y, color)\n self.pixel(center_x - x, center_y - y, color)\n self.pixel(center_x - y, center_y - x, color)\n self.pixel(center_x + y, center_y - x, color)\n self.pixel(center_x + x, center_y - y, color)\n if err <= 0:\n y += 1\n err += d_y\n d_y += 2\n if err > 0:\n x -= 1\n d_x += 2\n err += d_x - (radius << 1)", "def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)", "def filled_sphere(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0,(shape[2]-1)/2.0\n\tdef func(i0, i1, i2):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\tii2 = i2 - center[2]\n\t\trr2 = ii0**2 + ii1**2 + ii2**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def circle(radius = 10, angle_resolution = 2.5, layer = 0):\n D = Device(name = 'circle')\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n xpts = (radius*cos(t)).tolist()\n ypts = (radius*sin(t)).tolist()\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)", "def circle(self, p, radius, **kwargs):\n cx, cy = self._sky2img(p)\n self._draw.ellipse([cx-radius, cy-radius, cx+radius, cy+radius], **kwargs)", "def circle(n=5000, r=1, noise=0.05):\n phis = 2 * np.pi * np.random.rand(n)\n x = [[r * np.sin(phi), r * np.cos(phi)] for phi in phis]\n x = np.array(x)\n x = x + noise * np.random.randn(n, 2)\n return x", "def circular_aperture(self, r, scale=True):\n if scale:\n radius = r * self.a\n else:\n radius = r\n return CircularAperture(self.coords, float(np.abs(radius)))", "def circle_point(radius, phi):\n if radius <= 0:\n raise AssertionError('Radius mast be grater than 0')\n x = radius * cos(radians(phi))\n y = radius * sin(radians(phi))\n z = 0\n\n return x, y, z", "def __init__(self, center=None, radius=1):\n if center is None:\n center = Point()\n self.center = center\n self.radius = radius", "def add_circle(self, r_center, c_center, radius, color=BLUE, image=np.full((640, 480, 3), BLACK)):\n circle = np.fromfunction(lambda r, c, _: (r - r_center) ** 2 + (c - c_center) ** 2 <= radius ** 2, image.shape)\n return np.where(circle, color, image)", "def draw_circle(self, color, position, radius, width=0, anchor='topleft'):\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, (position + offset).floor(),\n radius, width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def randomPointOnSphere(r):\n x = np.random.normal()\n y = np.random.normal()\n z = np.random.normal()\n point = np.array([x, y, z])\n point *= r/(x**2 + y**2 + z**2)**.5\n return point", "def AddCircle(self,centerPnt,radius):\n\t\tcircle=self.Space.AddCircle(centerPnt,radius)\n\t\treturn circle", "def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)", "def filled_circle(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0\n\tdef func(i0, i1):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\trr2 = ii0**2 + ii1**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def circle(x: float, y: float, r: float, quantization: float):\n\n n = math.ceil(2 * math.pi * r / quantization)\n angle = np.array(list(range(n)) + [0]) / n * 2 * math.pi\n return LineCollection([r * (np.cos(angle) + 1j * np.sin(angle)) + complex(x, y)])", "def draw_circle(self, color, position, radius, width = 0, anchor= 'topleft'):\n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, position + offset, radius, width)", "def __init__( self , center , radius ):\r\n self.center = center\r\n self.radius = radius", "def plot_circle(r,**kw):\n try:\n fmt = kw.pop('fmt')\n except:\n fmt='k'\n try:\n label = kw.pop('label')\n except:\n label = None\n x = num.arange(-r,r+0.01,0.01)\n y = num.sqrt(num.fabs(r**2. - x**2.))\n pyplot.plot(x,y,fmt,**kw)\n pyplot.plot(x,-y,fmt,label=label,**kw)", "def circle_new(shape = (1024, 1024), radius=0.25, Nrad = None, origin=[0,0]):\r\n if Nrad == None :\r\n pass\r\n else :\r\n radius = max([shape[0], shape[1]]) \r\n radius = np.float(Nrad) / np.float(radius) \r\n # \r\n x, y = make_xy(shape, origin = origin)\r\n r = np.sqrt(x**2 + y**2)\r\n if shape[1] > shape[0]:\r\n rmax = radius * shape[0] / 2\r\n else :\r\n rmax = radius * shape[1] / 2\r\n arrayout = (r <= rmax)\r\n return np.array(arrayout, dtype=np.float64)", "def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def draw_circle_filled(center_x, center_y, radius, color):\n width = radius\n height = radius\n draw_ellipse_filled(center_x, center_y, width, height, color)", "def circle(radius, extent=360):\n turtleTmp.circle(radius, extent)", "def _generate_circle_mask(center_y, center_x, radius):\n\n circle = draw.circle(center_y, center_x, radius)\n\n return circle", "def Icosphere(radius=1.0, center=(0.0, 0.0, 0.0), nsub=3):\n mesh = Icosahedron()\n mesh.clear_data()\n mesh = mesh.subdivide(nsub=nsub)\n\n # scale to desired radius and translate origin\n dist = np.linalg.norm(mesh.points, axis=1, keepdims=True) # distance from origin\n mesh.points = mesh.points * (radius / dist) + center\n return mesh", "def createCircle(self, x, y, radius):\n # TODO (#2398) fix this to be top left coordinates, width, height\n return QtCore.QRectF(\n int(x - radius), int(y - radius), int(radius * 2), int(radius * 2)\n )", "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(c):\n turtle.circle(c.radius)", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def draw_circle(self, x, y, radius, color=Color['white']):\n pygame.draw.circle(self.display, color, (x, y), radius)", "def make_circle(self):\n A = 2*np.random.rand(self.m, self.n)-1\n b = np.sign(np.sum(A**2, 1) - self.radius)\n return A, b", "def draw_circle(self, x0, y0, r, color=None):\n f = 1 - r\n ddF_x = 1\n ddF_y = -2 * r\n x = 0\n y = r\n\n self.set(x0, y0 + r, color)\n self.set(x0, y0 - r, color)\n self.set(x0 + r, y0, color)\n self.set(x0 - r, y0, color)\n\n while x < y:\n if f >= 0:\n y -= 1\n ddF_y += 2\n f += ddF_y\n x += 1\n ddF_x += 2\n f += ddF_x\n\n self.set(x0 + x, y0 + y, color)\n self.set(x0 - x, y0 + y, color)\n self.set(x0 + x, y0 - y, color)\n self.set(x0 - x, y0 - y, color)\n self.set(x0 + y, y0 + x, color)\n self.set(x0 - y, y0 + x, color)\n self.set(x0 + y, y0 - x, color)\n self.set(x0 - y, y0 - x, color)", "def get_nice_circle(x, y, radius, color=\"lightsteelblue\", facecolor=\"green\", alpha=.6, ax=None ):\n e = pl.Circle([x, y], radius)\n if ax is None:\n ax = pl.gca()\n ax.add_artist(e)\n e.set_clip_box(ax.bbox)\n e.set_edgecolor( color )\n e.set_linewidth(3)\n e.set_facecolor( facecolor ) # \"none\" not None\n e.set_alpha( alpha )\n return e", "def Octahedron(radius=1.0, center=(0.0, 0.0, 0.0)):\n return PlatonicSolid(kind='octahedron', radius=radius, center=center)", "def draw_circle(color, position, radius, width=0):\n #print('(color={}, position={}, radius={}, width={})')\n pygame.draw.circle(screen, color, position, radius, width)", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def radius(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.r = sqrt((self.x-xc)**2+(self.y-yc)**2)", "def circle(x, r, a, b, x_lim):\n y = (b + np.sqrt(maximum(\n r ** 2 - ((x - a) ** 2) * (x >= x_lim[0]) * (x <= x_lim[1]))\n )) * (x >= x_lim[0]) * (x <= x_lim[1])\n return y", "def find_center(r):\n cx=r.corner.x+(r.width/2)\n cy=r.corner.y+(r.height/2)\n return cx,cy", "def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)", "def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)", "def half_circle(n=5000, r=1, noise=0.05):\n phis = np.pi * np.random.rand(n)\n x = [[r * np.sin(phi), r * np.cos(phi)] for phi in phis]\n x = np.array(x)\n x = x + noise * np.random.randn(n, 2)\n return x", "def draw_circle(centerx, centery, radius):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n circle = Circle()\r\n circle.move(centerx, centery)\r\n circle.setRadius(radius)\r\n _set_not_filled(circle)\r\n _canvas.add(circle)", "def pos_on_semicircle(x, r, cxy):\n pos = np.sqrt(r ** 2 - (x - cxy[0]) ** 2) + cxy[1]\n\n return pos", "def objects_radius(self, centre, radius):", "def circle(self, clear_screen=True, x=50, y=50, radius=40, fill_color='black', outline_color='black'):\n\n if clear_screen:\n self.clear()\n\n x1 = x - radius\n y1 = y - radius\n x2 = x + radius\n y2 = y + radius\n\n return self.draw.ellipse((x1, y1, x2, y2), fill=fill_color, outline=outline_color)", "def circle(self, radius, extent=None, steps=None):\n super().circle(radius, extent, steps)", "def createSphere( position=(0,0,0), radius=1, colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20 ):\r\n return createEllipsoid( position, (radius,radius,radius), colour, samplesY, samplesXZ )", "def create_circle(radius=None, n_instance=None):\n del_theta = np.pi * 2/(n_instance)\n theta_list = np.linspace(0, np.pi * 2, n_instance)\n coordinates = [circular_movement(radius, theta) for theta in theta_list]\n return coordinates", "def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r", "def circle(radius = 15, resolution = 20, robotHeight = -90, n = 1, dir = 0):\n \n t = np.linspace(0, n*2*m.pi, resolution*n)\n circlePos = []\n for num in t:\n if dir == 0:\n x = m.cos(num)*radius\n y = m.sin(num)*radius\n else:\n x = m.cos(num)*radius\n y = m.sin(num-m.pi)*radius\n\n circlePos.append([x, y, robotHeight, 0, 0, 0, 'mov'])\n\n circlePos.append([0,0,-127,0,0,0,'mov'])\n return circlePos", "def circle(center, perp_vect, radius, element_number=10):\n # tl = [0, 0.2, 0.4, 0.6, 0.8]\n tl = np.linspace(0, 1, element_number)\n\n # vector form center to edge of circle\n # u is a unit vector from the centre of the circle to any point on the\n # circumference\n\n # normalized perpendicular vector\n n = perp_vect / np.linalg.norm(perp_vect)\n\n # normalized vector from the centre to point on the circumference\n u = perpendicular_vector(n)\n u /= np.linalg.norm(u)\n\n pts = []\n\n for t in tl:\n # u = np.array([0, 1, 0])\n # n = np.array([1, 0, 0])\n pt = (\n radius * np.cos(t * 2 * np.pi) * u\n + radius * np.sin(t * 2 * np.pi) * np.cross(u, n)\n + center\n )\n\n pt = pt.tolist()\n pts.append(pt)\n\n return pts", "def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)", "def initP0(self, size, radius):\n return h.circle(size, radius)[:, :, 0]", "def area_circle(r):\n return (r ** 2) * math.pi", "def __init__(self, c, radius, a0, da):\n Circle.__init__(self, Vector(c).to_2d(), radius)\n self.line = None\n self.a0 = a0\n self.da = da", "def plot_circle(self, radius, c=color, ax=None, label=True, fontsize=12, **kwargs):\n if ax is None:\n ax = plt.gca()\n circle = Circle(self.coords, radius, fill=None, ec=c, **kwargs)\n ax.add_artist(circle)\n if label and self.i is not None:\n plt.text(\n *(np.array(self.coords) - [0, 1.5 * radius]),\n self.i,\n c=c,\n ha=\"center\",\n va=\"top\",\n fontsize=fontsize,\n )", "def draw_full_circle(x, y, radius):\n iterations = int(2 * radius * pi)\n s = sin(2 * pi / iterations)\n c = cos(2 * pi / iterations)\n\n dx, dy = radius, 0.\n\n glBegin(GL_TRIANGLE_FAN)\n glVertex2f(x, y)\n for _ in range(iterations + 1):\n glVertex2f(x + dx, y + dy)\n dx, dy = (dx * c + dy * s), (dy * c - dx * s)\n glEnd()", "def random_uniform_within_circle():\n rho = np.sqrt(np.random.uniform(0, 1))\n phi = np.random.uniform(0, 2 * np.pi)\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return np.array([x, y])", "def oncircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n # This beats normalizing incircle for all sizes, even though that\n # should be the superior algorithm for compiled code.\n theta = 2.*pi * random(size + (1,))\n return concatenate((cos(theta), sin(theta)), axis=-1)", "def create_circle(self, cx, cy, radius, style=None, parent=None):\n if parent is None:\n parent = self.current_parent\n if parent is not None:\n attrs = {'r': str(radius), 'cx': str(cx), 'cy': str(cy)}\n if style:\n attrs['style'] = style\n return etree.SubElement(parent, svgns('circle'), attrs)", "def draw_circle_outline(center_x, center_y, radius, color, border_width=1):\n width = radius\n height = radius\n draw_ellipse_outline(center_x, center_y, width, height,\n color, border_width)", "def Calc_axe_spheroid(r,c):\n return np.sqrt((r**3)/c)", "def createCirclePolygon(h, k, r, dx):\n D = 10.0\n theta = 2 * np.arccos((r-(dx/D))/r)\n npoints = int(360.0/theta)\n x, y = getPointsInCircum(r, n=npoints, h=h, k=k)\n p = Polygon(list(zip(x, y)))\n return p", "def fillcircle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates, must never reverse\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw, same color for outline and fill\n draw.ellipse(rect, color, color)", "def points_on_circumference(center=(0, 0), r=50, n=100):\n\treturn [\n (\n center[0]+(cos(2 * pi / n * x) * r), \n center[1] + (sin(2 * pi / n * x) * r) \n\n ) for x in range(0, n + 1)]", "def circle_from_points(a, b, c):\n ab = subtract_vectors(b, a)\n cb = subtract_vectors(b, c)\n ba = subtract_vectors(a, b)\n ca = subtract_vectors(a, c)\n ac = subtract_vectors(c, a)\n bc = subtract_vectors(c, b)\n normal = normalize_vector(cross_vectors(ab, ac))\n d = 2 * length_vector_sqrd(cross_vectors(ba, cb))\n A = length_vector_sqrd(cb) * dot_vectors(ba, ca) / d\n B = length_vector_sqrd(ca) * dot_vectors(ab, cb) / d\n C = length_vector_sqrd(ba) * dot_vectors(ac, bc) / d\n Aa = scale_vector(a, A)\n Bb = scale_vector(b, B)\n Cc = scale_vector(c, C)\n center = add_vectorlist([Aa, Bb, Cc])\n radius = distance_point_point(center, a)\n return center, radius, normal", "def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def plot(self, radius=15, **kwargs):\n self.plot_circle(radius, **kwargs)", "def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)", "def circleInfo(r):\n c = 2 * 3.14159 * r\n a = 3.14159 * r * r\n return (c, a)", "def area_circle(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"area_circle() only accepts non-negative values\")\r\n return pi * radius**2", "def surface_point(radius):\n z=random.uniform(-1,1)\n chi=random.uniform(0,2*numpy.pi)\n x=numpy.sqrt(1-z*z)*numpy.cos(chi)\n y=numpy.sqrt(1-z*z)*numpy.sin(chi)\n return radius*numpy.array([x,y,z])" ]
[ "0.769793", "0.7630966", "0.75068563", "0.73866713", "0.73565596", "0.7355884", "0.7321714", "0.72842544", "0.72638685", "0.72363985", "0.72306716", "0.72283244", "0.71910393", "0.7182544", "0.7088393", "0.7056075", "0.70258105", "0.6998519", "0.69953614", "0.69949824", "0.69949824", "0.69793266", "0.697089", "0.6936297", "0.6914057", "0.6910311", "0.68995893", "0.68866396", "0.68862146", "0.68686074", "0.6837757", "0.6789712", "0.6772269", "0.6765891", "0.67453176", "0.6732436", "0.6720182", "0.6710668", "0.670215", "0.6700232", "0.66916317", "0.6685129", "0.66785455", "0.6626484", "0.66235006", "0.6567935", "0.6567301", "0.6559519", "0.6545657", "0.6537419", "0.6490755", "0.6490329", "0.6490329", "0.6469824", "0.6469824", "0.6462379", "0.6443092", "0.64390135", "0.6433608", "0.6427514", "0.64111865", "0.6397197", "0.63958365", "0.63657314", "0.6357481", "0.6346355", "0.6331287", "0.63257945", "0.63210917", "0.6308456", "0.62812346", "0.6280265", "0.62736493", "0.6273244", "0.6268905", "0.6262728", "0.6260079", "0.6259824", "0.625363", "0.6253238", "0.62522626", "0.6246306", "0.62437713", "0.62372017", "0.62315536", "0.6231516", "0.6214043", "0.62027353", "0.6190812", "0.61891115", "0.61877894", "0.61543167", "0.6153773", "0.61527866", "0.6143822", "0.61323243", "0.61201566", "0.6115602", "0.6097838", "0.609685" ]
0.73965037
3
Create a centered square (or cube) with edge length of d.
def model_square(d, nx, ny, nz=1): e = EMData() e.set_size(nx, ny, nz) e.process_inplace("testimage.squarecube", {"edge_length":d, "fill":1}) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)", "def initialize_d(d, square_sides, offset=0):\n return {key:[] for key in range(offset, square_sides ** 2 + offset)}", "def square_diamond(sx, sy, size, strong):\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)", "def make_square(x, size):\n return [ [x, -size/2, size/2],\n\t\t\t [x, size/2, size/2],\n [x, size/2, -size/2],\n\t\t\t [x, -size/2, -size/2]]", "def make_box_square(box, offset_scale=0.05):\n\n x_min, y_min, x_max, y_max = box[:4]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n\n if height >= width:\n half_box = height / 2.\n x_min = center_x - half_box\n x_max = center_x + half_box\n if width > height:\n half_box = width / 2.\n y_min = center_y - half_box\n y_max = center_y + half_box\n\n box_side_lenght = (x_max + x_min) / 2.\n offset = offset_scale * box_side_lenght\n x_min = x_min - offset\n x_max = x_max + offset\n y_min = y_min - offset\n y_max = y_max + offset\n return (int(x_min), int(y_min), int(x_max), int(y_max))", "def from_diag(d, context = FloatContext):\n n = len(d)\n S = zeros(n,n,context)\n set_diag(S,d)\n return S", "def plasm_cube(self, size=0.1, color=WHITE):\n return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))", "def bandcholesky(A, d):\n L, dg = LDL(A, d)\n return matrix(L)*diag(sqrt(dg))", "def get_sqd_from_center(boardsize):\n assert boardsize % 2 == 1\n center = boardsize // 2\n return distances_from_pt(sq_distance, (center, center), boardsize)", "def square(center, side_length, *args, **kwargs):\n center = np.asarray(center)\n side_length = float(side_length)\n lower_left = center - 0.5*side_length\n return patch.Rectangle(lower_left, side_length, side_length,\n \t\t\t\t\t *args, **kwargs)", "def __init__(\n self, d: int, seed: Optional[int] = None, inv_transform: bool = False\n ) -> None:\n self._d = d\n self._seed = seed\n self._inv_transform = inv_transform\n if inv_transform:\n sobol_dim = d\n else:\n # to apply Box-Muller, we need an even number of dimensions\n sobol_dim = 2 * math.ceil(d / 2)\n self._sobol_engine = SobolEngine(dimension=sobol_dim, scramble=True, seed=seed)", "def box(original, radius):\n batches = original.size()[0]\n num_elem = h.product(original.size()[1:])\n ei = h.getEi(batches,num_elem)\n \n if len(original.size()) > 2:\n ei = ei.contiguous().view(num_elem, *original.size())\n\n return HBox(original, None, ei * radius).checkSizes()", "def draw_square(square_edge):\n\n # create a square filled with zeros with square_edge as size and int as data type\n square = numpy.zeros((square_edge, square_edge), dtype=int)\n\n # square is a \"matix\" of N (square_edge) list\n # we use list index to place the numbers\n current_list = 0\n list_index = square_edge // 2\n\n # magic square logic\n for number in range(1, square_edge**2+1, 1):\n\n # we place our first number\n square[current_list, list_index] = number\n\n # we update how the next number will be placed in the square\n update_current_list = (current_list - 1) % square_edge\n update_list_index = (list_index + 1) % square_edge\n\n # if value of the index are not 0 we will increment current_list\n if square[update_current_list, update_list_index]:\n current_list += 1\n\n # else we simply update the index\n else:\n current_list = update_current_list\n list_index = update_list_index\n\n return square", "def get_box(x_tr, y_tr, d=1.):\n xs = np.array([-1., 1., 1., -1., -1.])\n ys = np.array([-1., -1., 1., 1., -1.])\n\n xs = xs*d/2 + x_tr\n ys = ys*d/2 + y_tr\n\n return xs, ys", "def center_size(boxes):\n concat = P.Concat(1)\n return concat(((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2])) # w, h", "def sqft(d):\n return 2 * (d.l*d.w + d.l*d.h + d.h*d.w)", "def sqrtCF(d):\n sqrtD = sqrt(d)\n P = 0\n Q = 1\n while True:\n a = int(floor((P + sqrtD) / Q))\n yield a\n P = a * Q - P\n Q = (d - P*P) // Q # It can be shown that Q evenly divides d - P*P", "def C(width = 1, size = (10, 20), layer = 0):\n D = Device(name = 'C')\n w = width/2\n s1, s2 = size\n points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2-w),\n (s1, s2-w), (s1, s2+w), (-w, s2+w), (-w, -w)]\n D.add_polygon(points, layer = layer)\n D.add_port(name = 1, midpoint = (s1, s2), width = width, orientation = 0)\n D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0)\n return D", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def box(original, diameter):\n return Box(original, h.ones(original.size()) * diameter, None).checkSizes()", "def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):\n return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)", "def sample(self, d):\n dist = rnd.uniform(0,self.length)\n w = rnd.normal(0,self.width)\n d.pos = np.dot(rotmat(self.angle), [dist, w]) + self.pos\n d.ownpos = self.pos", "def center_size(boxes):\n return torch.cat([(boxes[:, :2] + boxes[:, 2:])/2, # cx, cy\n boxes[:, :2] - boxes[:, 2:]], 1) # w, h", "def sqnorm(self, d):\n ###TODO\n total = 0.0\n for i in d:\n total = total + (d[i] * d[i])\n return total", "def coordinates_of_square(crd):\n col = ord(crd[0]) - ord('a')\n row = int(crd[1]) - 1\n return (col * SQUARE_EDGE + BOARD_MARGIN, (7 - row) * SQUARE_EDGE + BOARD_MARGIN)", "def __init__(self, d):\n\t\tself._coords = [0] * d", "def diagonal(self):\r\n return math.sqrt((self.width ** 2) + (self.height ** 2))", "def propagateInBox((x0,y0,z0), (px,py,pz), d):\n x = x0 + px*d\n y = y0 + py*d\n z = z0 + pz*d\n\n return (x,y,z)", "def diagonal(cube_edge: int=128,\n radius: int=10,\n foreground: int=1,\n dtype=np.uint8):\n if 2 * radius > cube_edge:\n raise ValueError(\"Given radius '{}' is larger than than cube edge length {}\"\n .format(radius, cube_edge))\n stack = np.zeros((cube_edge, cube_edge, cube_edge), dtype=bool)\n cylinder = [\n ((0, 0, 0), (cube_edge - 1, cube_edge - 1, cube_edge - 1), radius)\n ]\n stack = add_cylinder_px(stack, *cylinder[0])\n return volume_bool_to_dtype(stack, fg=foreground, dtype=dtype)", "def central_composite(d,center=(2,2),alpha='o',face='ccc'):\n \n return build_central_composite(d,center=center,alpha=alpha,face=face)", "def diamondSquareFractal3D(size, roughness = .5, perturbance = .5):\n start = time.time()\n count = 0;\n \n #calculate the fractal based on the next highest 2^n + 1\n n = math.log(size-1, 2)\n if n != float(int(n)):\n print \"The size is not valid, choose a side that is a power of 2 + 1.\"\n print \"65, 129, 257, 513, 1025, etc.\"\n return\n \n matrix = numpy.zeros((size, size, size)) - 100\n total = matrix.size\n \n applyCornerValues(matrix, roughness)\n \n \"\"\"\n The algorithm requires calculating the centers, faces and edges \n in a particular order so that neighboring values will be \n available to average:\n \n 1. Calc centers of all size n cubes.\n 2. for each size n cube:\n a. calc faces\n b. calc edges\n c. divide the cube into 8 cubes of size n-1.\n d. repeat from step 1 until n < 3.\n \n Subdivided cubes are populated in the following order so that a\n cube will not be populated until its top, left, and front neighbors\n are done. Values from neighboring cubes are used to calculate \n top, left and front faces and edges\n top. left, front\n top, left, back\n top, right, front\n bottom, left, front\n top, right, back\n bottom, left, back\n bottom, right, front\n bottom, right, back\n \n A queue will be used to manage the cubes waiting to be calculated.\n \"\"\"\n \n from collections import deque\n queue = deque()\n \n # add the whole matrix.\n queue.append([0, 0 ,0 , size-1])\n\n # calc the center for the whole matrix\n pf = perturbanceFactor(size, size, perturbance)\n noiseLevel = roughness * pf\n indexRef = getIndexRef(0, 0, 0, size/2, size)\n setValue(matrix, center, indexRef, noiseLevel)\n\n while len(queue) > 0:\n # pop a square\n whatisit = queue.popleft()\n [row, col, frame, range] = whatisit\n\n midRange = range/2\n # populate the faces and edges of the cube\n [midRow, midCol, midFrame] = populate3D(matrix, \n row, col, frame, midRange, roughness, perturbance)\n\n if midRange >= 2:\n #calc the centers for the next layer\n populateCenters(matrix, row, col, frame, midRange, roughness, perturbance)\n \n #add top left front cube to the queue \n queue.append([row, col, frame, midRange])\n \n #add top left back cube to the queue\n queue.append([row, col, midFrame, midRange])\n\n #add top right front cubes to the queue\n queue.append([row, midCol, frame, midRange])\n\n #add bottom left front cubes to the queue\n queue.append([midRow, col, frame, midRange])\n \n #add top right back cubes to the queue \n queue.append([row, midCol, midFrame, midRange])\n\n #add bottom left back cubes to the queue\n queue.append([midRow, col, midFrame, midRange])\n\n #add bottom right front cubes to the queue\n queue.append([midRow, midCol, frame, midRange])\n \n #add bottom right back cubes to the queue\n queue.append([midRow, midCol, midFrame, midRange])\n\n #print \"result mean =\", matrix.mean(), \"result std = \", matrix.std()\n print \"elapsed seconds =\", time.time() - start\n # return the requested size\n return matrix[0:size, 0:size]", "def __init__(self, d):\n self._coords = [0]*d", "def surface_area_cube(side_length: float) -> float:\r\n if side_length < 0:\r\n raise ValueError(\"surface_area_cube() only accepts non-negative values\")\r\n return 6 * side_length**2", "def onsphere(size=None):\n xy = oncircle(size)\n z = 2.*random(xy.shape[:-1] + (1,)) - 1.\n xy *= sqrt(1. - z*z)\n return concatenate((xy, z), axis=-1)", "def __init__(self, a, **kwargs):\r\n center = kwargs.get('center')\r\n density = kwargs.get(\"density\")\r\n spheroid.__init__(self, a, center=center, density=density)", "def _fill_cell_rectangle(size = (20, 20), layers = (0, 1, 3),\n densities = (0.5, 0.25, 0.7),\n inverted = (False, False, False)):\n D = Device('fillcell')\n for layer, density, inv in zip(layers, densities, inverted):\n rectangle_size = np.array(size) * sqrt(density)\n # r = D.add_ref(rectangle(size = rectangle_size, layer = layer))\n R = rectangle(size = rectangle_size, layer = layer)\n R.center = (0, 0)\n if inv is True:\n A = rectangle(size = size)\n A.center = (0,0)\n A = A.get_polygons()\n B = R.get_polygons()\n p = gdspy.boolean(A, B, operation = 'not')\n D.add_polygon(p, layer = layer)\n else:\n D.add_ref(R)\n return D", "def box(self, x, y, w, h):\n\t\tpass", "def rand_sphere(d0):\n p1 = np.random.randn(d0, 3)\n m = np.sqrt(np.sum(p1**2, axis=1))\n\n rad = pow(np.random.rand(d0), 1.0 / 3.0)\n return (p1.T * (rad / m)).T", "def zone_from_center_size(x, y, size):\n half_size = size // 2\n size = half_size * 2\n x1 = x - half_size\n x2 = x + half_size -1\n if x1 < 0:\n x1 = 0\n x2 = size - 1\n if x2 >= w:\n x2 = w - 1\n x1 = w - size\n y1 = y - half_size\n y2 = y + half_size\n if y1 < 0:\n y1 = 0\n y2 = size - 1\n if y2 >= h:\n y2 = h - 1\n y1 = h - size\n return [x1, y1, x2, y2]", "def sized_normal(self, t, size):\n p = self.lerp(t)\n if self.da < 0:\n v = self.c - p\n else:\n v = p - self.c\n return Line(p, size * v.normalized())", "def __init__(self, d=10):\n \n self.d = d\n self.indices = [i for i in range(d)]\n self.weights = [1 / w for w in range(1, d + 1)]", "def square(square_x, square_y, square_width, square_height, square_color):\n arcade.draw_rectangle_filled(square_x, square_y, square_width, square_height, square_color)", "def make_square(turt,sz):\n for i in range(4):\n turt.forward(sz)\n turt.left(90)", "def MakeDifficulties(center, width, n):\n low, high = center-width, center+width\n return numpy.linspace(low, high, n)", "def hadamard2(n):\n # set up plot stuff\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.set_xticks(range(n + 1)) # set axis ranges\n ax.set_yticks(range(n + 1))\n plt.xticks([]) # remove axis labels\n plt.yticks([])\n ax.set_aspect(aspect=1) # ensure it's a square and not a rectangle\n # invert y axis so the origin is the top left\n ax.set_ylim(ax.get_ylim()[::-1])\n\n def recurse(tlx, tly, brx, bry, flag):\n \"\"\" Given coords for the top left and bottom right of a square, recursively pass a boolean flag\n to see if we should draw it\n \"\"\"\n if(tlx + 1 == brx): # single square (width == 1)\n if flag: # draw black square\n ax.add_patch(Rectangle((tly, brx - 1), 1, 1, color='black'))\n return # no need to recurse anymore\n # here's the recursive part:\n # we go in the order of top left, top right, bottom left, bottom right\n # we negate the flag in the bottom right, and we keep the same flag for the rest\n recurse(tlx, tly, (tlx + brx) // 2, (tly + bry) // 2, flag)\n recurse((tlx + brx) // 2, tly, brx, (tly + bry) // 2, flag)\n recurse(tlx, (tly + bry) // 2, (tlx + brx) // 2, bry, flag)\n recurse((tlx + brx) // 2, (tly + bry) // 2, brx,\n bry, not flag) # invert bottom right\n\n recurse(0, 0, n, n, True) # initial case, pass corners of entire matrix\n plt.show()", "def test_euclidean_scale(self):\n\n s = space(curvature=0)\n\n magic = 77773.333773777773733\n for mul in (2, 5, 1/3, 1/11, magic, 1/magic):\n for name, dim in (\n ('sphere_s1', 1),\n ('sphere_v2', 2),\n ('sphere_s2', 2),\n ('sphere_v3', 3)\n ):\n self.assertTrue(isclose(\n getattr(s, name)(1) * mul**dim,\n getattr(s, name)(mul)\n ))", "def sized_normal(self, t, size):\n p = self.lerp(t)\n v = size * self.cross.normalized()\n return Line3d(p, v, z_axis=self.z_axis)", "def sized_normal(self, t, size):\n return Line(self.lerp(t), size * self.cross_z.normalized())", "def latin_hypercube(n_pts, dim):\n X = np.zeros((n_pts, dim))\n centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts)\n for i in range(dim): # Shuffle the center locataions for each dimension.\n X[:, i] = centers[np.random.permutation(n_pts)]\n\n # Add some perturbations within each box\n pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts)\n X += pert\n return X", "def __init__(self, n):\n self.row = [0] * n\n self.col = [0] * n\n self.diagonal = 0\n self.antidiagonal = 0\n self.winning = False", "def draw_square(turtle, size):\n\n square = range(4)\n\n for line in square:\n alexa.forward(size)\n alexa.left(90)", "def cube_area(side_length):\n area = side_length ** 3\n return area", "def extra(d):\n return min(d.l * d.w, d.w * d.h, d.h * d.l)", "def __init__(self, length):\n self.x = length\n self.y = length\n self._table = self.x * self.y", "def spaceDiagonal(self):\n spaceDiagonal = (3**(1/2)) * self.sideLength\n return spaceDiagonal", "def gen_square_subsequent_mask(sz: int) -> torch.Tensor:\n return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)", "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def solve_for_edge_dimensionality(n):\n return int(round(np.sqrt(2 * n + 2.25) - 1.5))", "def create_diamond(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, 0.0)\n b = Point3(1.0, -1.0, 0.0)\n c = Point3(1.0, 1.0, 0.0)\n d = Point3(-1.0, 1.0, 0.0)\n e = Point3(0.0, 0.0, 1.0)\n f = Point3(0.0, 0.0, -1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n glBegin(GL_TRIANGLES)\n drawVertexListCreateNormal([a, b, e])\n drawVertexListCreateNormal([b, c, e])\n drawVertexListCreateNormal([c, d, e])\n drawVertexListCreateNormal([d, a, e])\n drawVertexListCreateNormal([b, a, f])\n drawVertexListCreateNormal([c, b, f])\n drawVertexListCreateNormal([d, c, f])\n drawVertexListCreateNormal([a, d, f])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj", "def mesh_uniform(N_e, d, Omega):", "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def toCartesian(self, y):\r\n return Size - y", "def create_cube(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glBegin(GL_QUADS)\n glColor4fv(color)\n drawVertexListCreateNormal([a, b, c, d])\n drawVertexListCreateNormal([b, f, g, c])\n drawVertexListCreateNormal([f, e, h, g])\n drawVertexListCreateNormal([e, a, d, h])\n drawVertexListCreateNormal([d, c, g, h])\n drawVertexListCreateNormal([a, e, f, b])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj", "def centre_pad(length):\n if length % 2 == 0:\n side1 = int((size - length) / 2)\n side2 = side1\n else:\n side1 = int((size - length) / 2)\n side2 = side1 + 1\n return side1, side2", "def len_square(bound):\n\treturn (8 - 2 * bound)", "def generate_square_subsequent_mask(sz: int) -> Tensor:\n return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)", "def create_square_with_diag_grid(self, len_side_a, len_side_b):\n if (len_side_a < 2) or (len_side_b < 2):\n raise ValueError('side length attributes for HexagonalCells should be at least 2.')\n\n self.connections, self.weights = create_grid_square_with_diagonals(len_side_a, len_side_b)\n\n # populate the dictionary from cell coordinates to cell indexes in arrays connection and weights\n for i in range(len_side_a):\n for j in range(len_side_b):\n self.dict_cell_id_to_ind[(i, j)] = j + i*len_side_b", "def create_compute_box_size(self):\n def compute_best_size_for(dim):\n size = ((self.element_space[dim]-1)//self.box_space[dim]) + 1\n size += 2 * self.ghost_space[dim]\n while size % Level.BOX_ALIGNMENTS[dim]:\n size += 1\n return size\n\n return Space([compute_best_size_for(dim) for dim in range(self.dimensions)])", "def diamond(N, D, rng):\n samples = rng.randn(N, D)\n norm = np.sum(np.abs(samples), axis=1)\n return samples/norm[:,None]", "def createTaperedBox( position=(0,0,0), size=(1,1,1), colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20, exponentBottom = 4, exponentTop = 4, exponentSide = 4 ):\r\n\r\n return createEllipsoid( position, (size[0]/2.0,size[1]/2.0,size[2]/2.0), colour, samplesY, samplesXZ, exponentBottom, exponentTop, exponentSide )", "def draw_diamond(display, coord, box_size, color, bg_color):\n half = int(box_size * 0.5)\n left, top = coord\n vertices = [\n (left + half, top),\n (left + box_size - 1, top + half),\n (left + half, top + box_size - 1),\n (left, top + half),\n ]\n pygame.draw.polygon(display, color, vertices)\n return", "def createBox( size=(1,1,1), position=(0,0,0), colour=(0.6,0.6,0.6) ):\r\n \r\n size = PyUtils.toVector3d(size)\r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n delta = MathLib.Vector3d()\r\n for repeat in range(3):\r\n for x in (-0.5,0.5) :\r\n delta.x = size.x * x\r\n for y in (-0.5,0.5) :\r\n delta.y = size.y * y\r\n for z in (-0.5,0.5) :\r\n delta.z = size.z * z\r\n vertices.append( position + delta )\r\n \r\n faces = [(0,1,3,2),(5,4,6,7), # YZ Faces\r\n (9,13,15,11),(12,8,10,14), # XY Faces\r\n (18,19,23,22),(17,16,20,21)] # XZ Faces\r\n \r\n return create( vertices, faces, colour )", "def expand_twelve(vertices):\n box = np.zeros((12, 2), dtype=np.float32)\n p = vertices.shape[0]\n if p == 4:\n box[0, :] = vertices[0, :]\n box[1, :] = [(4 * vertices[0, 0] + vertices[1, 0]) / 5.0, (4 * vertices[0, 1] + vertices[1, 1]) / 5.0]\n box[2, :] = [(3 * vertices[0, 0] + 2 * vertices[1, 0]) / 5.0,\n (3 * vertices[0, 1] + 2 * vertices[1, 1]) / 5.0]\n box[3, :] = [(2 * vertices[0, 0] + 3 * vertices[1, 0]) / 5.0,\n (2 * vertices[0, 1] + 3 * vertices[1, 1]) / 5.0]\n box[4, :] = [(vertices[0, 0] + 4 * vertices[1, 0]) / 5.0, (vertices[0, 1] + 4 * vertices[1, 1]) / 5.0]\n box[5, :] = vertices[1, :]\n box[6, :] = vertices[2, :]\n box[7, :] = [(vertices[3, 0] + 4 * vertices[2, 0]) / 5.0, (vertices[3, 1] + 4 * vertices[2, 1]) / 5.0]\n box[8, :] = [(2 * vertices[3, 0] + 3 * vertices[2, 0]) / 5.0,\n (2 * vertices[3, 1] + 3 * vertices[2, 1]) / 5.0]\n box[9, :] = [(3 * vertices[3, 0] + 2 * vertices[2, 0]) / 5.0,\n (3 * vertices[3, 1] + 2 * vertices[2, 1]) / 5.0]\n box[10, :] = [(4 * vertices[3, 0] + vertices[2, 0]) / 5.0, (4 * vertices[3, 1] + vertices[2, 1]) / 5.0]\n box[11, :] = vertices[3, :]\n elif p == 6:\n box[0, :] = vertices[0, :]\n box[1, :] = [(vertices[0, 0] + vertices[1, 0]) / 2.0, (vertices[0, 1] + vertices[1, 1]) / 2.0]\n box[2, :] = vertices[1, :]\n box[3, :] = [(2 * vertices[1, 0] + vertices[2, 0]) / 3.0, (2 * vertices[1, 1] + vertices[2, 1]) / 3.0]\n box[4, :] = [(vertices[1, 0] + 2 * vertices[2, 0]) / 3.0, (vertices[1, 1] + 2 * vertices[2, 1]) / 3.0]\n box[5, :] = vertices[2, :]\n box[6, :] = vertices[3, :]\n box[7, :] = [(vertices[4, 0] + 2 * vertices[3, 0]) / 3.0, (vertices[4, 1] + 2 * vertices[3, 1]) / 3.0]\n box[8, :] = [(2 * vertices[4, 0] + vertices[3, 0]) / 3.0, (2 * vertices[4, 1] + vertices[3, 1]) / 3.0]\n box[9, :] = vertices[4, :]\n box[10, :] = [(vertices[5, 0] + vertices[4, 0]) / 2.0, (vertices[5, 1] + vertices[4, 1]) / 2.0]\n box[11, :] = vertices[5, :]\n elif p == 8:\n box[0, :] = vertices[0, :]\n box[1, :] = [(vertices[0, 0] + vertices[1, 0]) / 2.0, (vertices[0, 1] + vertices[1, 1]) / 2.0]\n box[2, :] = vertices[1, :]\n box[3, :] = vertices[2, :]\n box[4, :] = [(vertices[2, 0] + vertices[3, 0]) / 2.0, (vertices[2, 1] + vertices[3, 1]) / 2.0]\n box[5, :] = vertices[3, :]\n box[6, :] = vertices[4, :]\n box[7, :] = [(vertices[4, 0] + vertices[5, 0]) / 2.0, (vertices[4, 1] + vertices[5, 1]) / 2.0]\n box[8, :] = vertices[5, :]\n box[9, :] = vertices[6, :]\n box[10, :] = [(vertices[6, 0] + vertices[7, 0]) / 2.0, (vertices[6, 1] + vertices[7, 1]) / 2.0]\n box[11, :] = vertices[7, :]\n elif p == 10:\n box[0, :] = vertices[0, :]\n box[1, :] = vertices[1, :]\n box[2, :] = vertices[2, :]\n box[3, :] = vertices[3, :]\n box[4, :] = [(vertices[3, 0] + vertices[4, 0]) / 2.0, (vertices[3, 1] + vertices[4, 1]) / 2.0]\n box[5, :] = vertices[4, :]\n box[6, :] = vertices[5, :]\n box[7, :] = [(vertices[5, 0] + vertices[6, 0]) / 2.0, (vertices[5, 1] + vertices[6, 1]) / 2.0]\n box[8, :] = vertices[6, :]\n box[9, :] = vertices[7, :]\n box[10, :] = vertices[8, :]\n box[11, :] = vertices[9, :]\n elif p == 12:\n box = vertices\n else:\n raise ValueError(f'Invalid points dimension: {p}')\n\n return box", "def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def cube(n):\n return n**3", "def _center_seed(self, shape, seed):\n board = np.zeros(shape, dtype=int) # Start with a blank board\n\n # Find coordinates to place the seed array\n # Note: this assumes the seed <= the size of the game board\n x0 = shape[0]//2 - seed.shape[0]//2\n y0 = shape[1]//2 - seed.shape[1]//2\n x1 = x0 + seed.shape[0]\n y1 = y0 + seed.shape[1]\n\n # Place the seed on the board\n board[x0:x1, y0:y1] = seed\n\n return board", "def generate_square_vertices(geom):\n unit = geom.pix_x.unit\n width = geom.pixel_width.to_value(unit) / 2\n x = geom.pix_x.to_value(unit)\n y = geom.pix_y.to_value(unit)\n\n x_offset = width[:, np.newaxis] * np.array([-1, -1, 1, 1])\n y_offset = width[:, np.newaxis] * np.array([1, -1, -1, 1])\n\n x = x[:, np.newaxis] + x_offset\n y = y[:, np.newaxis] + y_offset\n return x, y", "def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)", "def solid(t, coord, ii, n_pixels, random_values):\n\n\n return (100,100,100)", "def build(self, box, size):\n centroid = box.centroid\n edge1_midpoint = get_midpoint_of_edge(\n box.vertices[0], box.vertices[1])\n distance_to_edge1 = centroid.distance_to_xyz(edge1_midpoint)\n quadrat_indexes1 = range(int(distance_to_edge1 / size) + 1)\n\n edge2_midpoint = get_midpoint_of_edge(\n box.vertices[0], box.vertices[3])\n distance_to_edge2 = centroid.distance_to_xyz(edge2_midpoint)\n quadrat_indexes2 = range(int(distance_to_edge2 / size) + 1)\n\n quadrats = list()\n for i in quadrat_indexes2:\n for j in quadrat_indexes1:\n if i == 0 and j == 0:\n quadrats.append(Quadrat((0, 0), size, centroid))\n else:\n top_right = Vertex(centroid.x + i * size,\n centroid.y + (j * size), centroid.z)\n top_left = Vertex(centroid.x + i * size,\n centroid.y - (j * size), centroid.z)\n bottom_right = Vertex(\n centroid.x - i * size, centroid.y + (j * size), centroid.z)\n bottom_left = Vertex(\n centroid.x - i * size, centroid.y - (j * size), centroid.z)\n four_quadrats = [Quadrat((i, j), size, top_right),\n Quadrat((i, -1 * j), size, top_left),\n Quadrat((-1 * i, j), size, bottom_right),\n Quadrat((-1 * i, -1 * j), size, bottom_left)]\n quadrats_inside = list(\n filter(lambda x: box.contains(x.midpoint), four_quadrats))\n quadrats += quadrats_inside\n # print(\"There are this many quadrats: \" + str(len(quadrats)))\n return tuple(set(quadrats))", "def faceDiagonal(self):\n faceDiagonal = (2**(1/2)) * self.sideLength\n return faceDiagonal", "def diagonal(d, axis=0):\n assert d.ndim == 1\n n = d.shape[0]\n times = lambda x: d * x\n trans = lambda x: _hermitian(d) * x\n times, trans = apply_along_axis(times, trans, axis)\n return Operator(times=times, trans=trans, shape=(n,n))", "def generate_box(n: int, include_origin=True) -> np.array:\n box = np.array((), dtype=np.int8)\n for i in range(-n, n + 1):\n for j in range(-n, n + 1):\n for k in range(-n, n + 1):\n if not (i == 0 and j == 0 and k == 0 and not include_origin):\n box = np.append(box, np.array((i, j, k)))\n\n if include_origin:\n shape = ((2 * n + 1) ** 3, 3)\n else:\n shape = ((2 * n + 1) ** 3 - 1, 3)\n return np.reshape(box, shape)", "def square(self):\n return self.x * self.x + self.y * self.y", "def Icosahedron(radius=1.0, center=(0.0, 0.0, 0.0)):\n return PlatonicSolid(kind='icosahedron', radius=radius, center=center)", "def centering_matrix(n):\n P = eye(n) - 1/float(n) * ones((n,n))\n return P", "def make_householder(a):\n\n v = a / (a[0] + np.copysign(np.linalg.norm(a), a[0]))\n v[0] = 1\n H = np.eye(a.shape[0])\n H -= (2 / np.dot(v, v)) * np.dot(v[:, None], v[None, :])\n return H", "def draw_square(display, coord, box_size, color, bg_color):\n left, top = coord\n half = int(box_size * 0.5)\n quarter = int(box_size * 0.25)\n pygame.draw.rect(\n display, color, (left + quarter, top + quarter, half, half))\n return", "def CubeGraph(n):\n theta = float(pi/n)\n\n d = {'':[]}\n dn={}\n p = {'':(float(0),float(0))}\n pn={}\n\n # construct recursively the adjacency dict and the positions\n for i in range(n):\n ci = float(cos(i*theta))\n si = float(sin(i*theta))\n for v,e in d.iteritems():\n v0 = v+'0'\n v1 = v+'1'\n l0 = [v1]\n l1 = [v0]\n for m in e:\n l0.append(m+'0')\n l1.append(m+'1')\n dn[v0] = l0\n dn[v1] = l1\n x,y = p[v]\n pn[v0] = (x, y)\n pn[v1] = (x+ci, y+si)\n d,dn = dn,{}\n p,pn = pn,{}\n\n # construct the graph\n r = Graph(name=\"%d-Cube\"%n)\n r.add_vertices(d.keys())\n for u,L in d.iteritems():\n for v in L:\n r.add_edge(u,v)\n r.set_pos(p)\n\n return r", "def dctmtx(n):\n x,y = np.meshgrid(range(n), range(n))\n D = np.sqrt(2.0/n) * np.cos(np.pi * (2*x+1) * y / (2*n))\n D[0] /= np.sqrt(2)\n return D", "def draw_random_u(d):\n mu = np.zeros(d)\n cov = np.eye(d)\n u = multivariate_normal.rvs(mean=mu, cov=cov)\n return u / np.linalg.norm(u)", "def square_circumference(a):\n return (4*a)", "def d2xy(n, d):\n x, y, rx, ry, s, t = 0, 0, 0, 0, 0, d\n s = 1\n while s < n:\n rx = 1 & (t // 2)\n ry = 1 & (t ^ rx)\n x, y = rot(s, x, y, rx, ry)\n x += s * rx\n y += s * ry\n t = t // 4\n s *= 2\n return x, y", "def print_square(size):\n if not isinstance(size, int):\n raise ValueError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n if size == 0:\n return\n for row in range(int(size)):\n for col in range(int(size)):\n print(\"{:s}\".format(\"#\"), end=\"\")\n print()", "def eye(size):\n return Matrix.diagonal(size, 1.0)", "def test_shaped_instance(self, seed):\n dim = Dimension(\"yolo\", \"norm\", 0.9, shape=(3, 2))\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert_eq(dists.norm.rvs(0.9, size=(3, 2)), samples[0])\n\n assert dim.shape == (3, 2)\n\n dim = Dimension(\"yolo\", \"norm\", 0.9, shape=4)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert_eq(dists.norm.rvs(0.9, size=4), samples[0])\n\n assert dim.shape == (4,)", "def centered_stencil(self):\n\n # compute the shape of the new stencil\n shp = self.arr.shape\n shp = tuple(max(i, j-(i+1))*2 + 1 for i, j in zip(self.center, shp))\n # print(\"New Shape :\", shp)\n # generate the stencil in the right shape\n S = np.zeros(shp)\n # embed the stencil into the bigger stencil in order to place the center\n # into the center\n slc = []\n for c, shp_arr, shp_s in zip(self.center, self.arr.shape, shp):\n if c < shp_arr/2:\n slc.append(slice(shp_s - shp_arr, None))\n else:\n slc.append(slice(0, -(shp_s - shp_arr)))\n\n # print(slc)\n S[slc] = self.arr[:]\n # print(\"The Stencil\")\n # print(self.arr)\n # print(\"Centered stencil\")\n # print(S)\n return S", "def cmdscale(D):\n # Number of points\n n = len(D)\n\n # Centering matrix\n H = np.eye(n) - np.ones((n, n))/n\n\n # YY^T\n B = -H.dot(D**2).dot(H)/2\n\n # Diagonalize\n evals, evecs = np.linalg.eigh(B)\n\n # Sort by eigenvalue in descending order\n idx = np.argsort(evals)[::-1]\n evals = evals[idx]\n evecs = evecs[:, idx]\n\n # Compute the coordinates using positive-eigenvalued components only\n w, = np.where(evals > 0)\n L = np.diag(np.sqrt(evals[w]))\n V = evecs[:, w]\n Y = V.dot(L)\n\n return Y, evals[evals > 0]", "def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box", "def make_box(self, scale, color):\n box = Box()\n box.set_color(c=color)\n box.set_location(0, 0, 0)\n box.set_size(scale, scale, scale)\n return box" ]
[ "0.5763527", "0.5631988", "0.55919534", "0.5325949", "0.52789533", "0.52779573", "0.5240144", "0.5220922", "0.5193584", "0.5174843", "0.5162735", "0.5129172", "0.5129167", "0.51191884", "0.5118387", "0.51183224", "0.50997084", "0.50534856", "0.50478446", "0.50203365", "0.4996185", "0.49504822", "0.49483687", "0.49390522", "0.49352452", "0.4905792", "0.49013817", "0.4879734", "0.4877485", "0.4870109", "0.48628297", "0.48516265", "0.48506096", "0.48316988", "0.4829679", "0.48289815", "0.48227805", "0.48082832", "0.4802107", "0.47995335", "0.47992527", "0.4755421", "0.47532874", "0.475253", "0.47493905", "0.47401658", "0.473692", "0.47350475", "0.47298098", "0.47209114", "0.47191516", "0.47134316", "0.47046056", "0.4701363", "0.4700699", "0.46998116", "0.4688682", "0.46793416", "0.467599", "0.46735117", "0.46701315", "0.46675038", "0.46663028", "0.4658024", "0.46557057", "0.46556953", "0.46537572", "0.4649176", "0.46477684", "0.4646615", "0.46447897", "0.46428654", "0.4640011", "0.46386576", "0.46255088", "0.4610281", "0.45973548", "0.45943734", "0.4591791", "0.45902467", "0.45866218", "0.45795867", "0.45747194", "0.45737323", "0.4567216", "0.4565515", "0.456326", "0.4559941", "0.45588025", "0.45535576", "0.45518452", "0.45427582", "0.45420173", "0.45408636", "0.4539642", "0.45319277", "0.45318568", "0.45316955", "0.45273712", "0.45273712" ]
0.63994324
0
Create a centered Gaussian image having standard deviation "sigma".
def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None): e = EMData() e.set_size(nx, ny, nz) if( ysigma == None ) : ysigma = xsigma if( zsigma == None ) : zsigma = xsigma if( xcenter == None ) : xcenter = nx//2 if( ycenter == None ) : ycenter = ny//2 if( zcenter == None ) : zcenter = nz//2 e.process_inplace("testimage.puregaussian", {"x_sigma":xsigma,"y_sigma":ysigma,"z_sigma":zsigma,"x_center":xcenter,"y_center":ycenter,"z_center":zcenter} ) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_gaussian(size, sigma=10, center=None):\n\n x = np.arange(0, size[1], 1, float)\n y = np.arange(0, size[0], 1, float)\n y = y[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size[0] // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def makeGaussian(height, width, sigma=3, center=None):\n x = np.arange(0, width, 1, float)\n y = np.arange(0, height, 1, float)[:, np.newaxis]\n if center is None:\n x0 = width // 2\n y0 = height // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)", "def makeGaussian(size, fwhm, sigma, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n \n #return (np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)) #approximation using exponenial functions\n return ((1/(2*np.pi*sigma**2))*np.exp(-((xx)**2 + (yy)**2)/(2*sigma**2))) # symmetric 2D Gaussian distribution", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def GaussianKernel(sigma: float = 1., width: int = 0):\n assert not ((width is None or width == 0) and\n (sigma is None or sigma == 0)), \\\n \"GaussianKernel :: both sigma ({}) & width ({}) are not valid\".format(\n sigma, width)\n\n if width is None or width == 0:\n width = int(2.0 * 3.0 * sigma + 1.0)\n if width % 2 == 0:\n width += 1\n\n if sigma is None or sigma == 0:\n sigma = (width - 1)/6.\n half = width//2\n x, y = np.meshgrid(np.linspace(-half, half, width),\n np.linspace(-half, half, width), indexing='xy')\n w = np.exp(- (x**2 + y**2) / (2.*(sigma**2)))\n w /= np.sum(w)\n return torch.from_numpy(w.astype(np.float32)).view(1, 1, width, width)", "def gauss_kern(sigma, size):\r\n size = int(np.floor(size/2))\r\n sizey = size\r\n x, y = scipy.mgrid[-size:size+1, -sizey:sizey+1]\r\n g = scipy.exp(-(x**2+y**2) / (2*(sigma)**2))\r\n return np.ravel(g / g.max())", "def gaussian(size,sigma):\n a,b=np.ogrid[-size/2:size/2,-size/2:size/2]\n mask = a**2+b**2\n mask = np.exp(-mask.astype('float')/(2*float(sigma**2)))\n return mask", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def add_gaussian_noise(image, min_sigma, max_sigma):\n assert(max_sigma >= min_sigma)\n sigma = np.random.uniform(min_sigma, max_sigma)\n noise = np.random.normal(loc=0, scale=sigma, size=image.shape)\n noisy_im = np.floor((image + noise) * 255) / 255\n noisy_im = np.clip(noisy_im, 0, 1)\n return noisy_im", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def add_gaussian_noise(image, min_sigma, max_sigma):\r\n sigma = np.random.uniform(min_sigma, max_sigma)\r\n corrupted_im = image + np.random.normal(0, sigma, image.shape)\r\n rounded_im = (np.round(corrupted_im * MAX_PIXEL_NUMBER)) / MAX_PIXEL_NUMBER\r\n return np.clip(rounded_im, 0, 1).astype(np.float64)", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def makeGaussian(size, fwhm, center=None):\n\n x = sp.arange(0, size, 1, float)\n y = x[:,sp.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return sp.exp(-4*sp.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def gaussian_black(z, mu: 'normal' = 0, sigma: (0.4,1) = 0.7):\n return 1/(np.sqrt(2*np.pi)*sigma)*np.exp(-np.power((z - mu)/sigma, 2)/2)", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def gauss_kern(size, sigma=1.0):\n h1 = size[0]\n h2 = size[1]\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );\n return g / g.sum()", "def gaussianKernel(size, sigma=1):\n\n colourers.info(f'Creating gaussian kernel of size {size} with sigma of {sigma}')\n size = int(size) // 2\n x, y = np.mgrid[-size:size+1, -size:size+1]\n normal = 1 / (2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0 * sigma ** 2))) * normal\n return g", "def Gaussian(x, mu, sigma, a):\n amplitude = a / ( sigma * np.sqrt(2 * np.pi) )\n u = (x - mu) / sigma\n return amplitude * np.exp( -0.5 * (u**2) )", "def makeGaussian(size, fwhm=3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0)**2 + (y - y0)**2) / fwhm**2)", "def makeGaussian(size, fwhm = 3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def makeGaussian(size, fwhm = 3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r", "def set_gaussian(self, X, sigma=0):\n sigma = float(sigma)\n if sigma < 0:\n raise ValueError('sigma should be positive')\n self.set_euclidian(X)\n d = self.weights\n\n if sigma == 0:\n sigma = (d ** 2).mean()\n\n w = np.exp(- (d ** 2) / (2 * sigma))\n self.weights = w", "def gaussianPSF(shape, sigma):\n psf = dg.drawGaussiansXY(shape,\n numpy.array([0.5*shape[0]]),\n numpy.array([0.5*shape[1]]),\n sigma = sigma)\n return psf/numpy.sum(psf)", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def _FWHMGauss(sigma, pixel=12):\n return sigma*2*np.sqrt(2*np.log(2))*pixel", "def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))", "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def GaussianKernel(shape=(3, 3), sigma=0.5):\r\n radius_x, radius_y = [(radius-1.)/2. for radius in shape]\r\n y_range, x_range = np.ogrid[-radius_y:radius_y+1, -radius_x:radius_x+1]\r\n h = np.exp(- (x_range*x_range + y_range*y_range) / (2.*sigma*sigma))\r\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\r\n sumofh = h.sum()\r\n if sumofh != 0:\r\n h /= sumofh\r\n return h", "def make_gaussian(size, fwhm=3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n k = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)\n return k / np.sum(k)", "def random_gaussian(img, mu=0.0, sigma=4.0):\n\n out = np.copy(img.astype(np.float))\n rows, cols, depth = img.shape\n noise = np.random.normal(mu, sigma, (rows, cols))\n for dim in range(depth):\n out[:, :, dim] = img[:, :, dim] + noise\n out[out > 255] = 255\n out[out < 0] = 0\n out = out.astype(np.uint8)\n\n return out", "def gaussianBlur(img,ksize=(5,5),sigma=10):\n #kernel = cv2.getGaussianKernel(ksize,sigma)\n dst = np.zeros_like(img)\n cv2.GaussianBlur(src=img,dst=dst,ksize=ksize,sigmaX=0)\n return dst", "def gaussian(t, params):\n DeprecationWarning(\"Using standard width. Better use gaussian_sigma.\")\n params['sigma'] = Qty(\n value=params['t_final'].get_value()/6,\n min_val=params['t_final'].get_value()/8,\n max_val=params['t_final'].get_value()/4,\n unit=params['t_final'].unit\n )\n return gaussian_sigma(t, params)", "def generate_gaussian_kernel(shape=(3,3),sigma=0.8):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def calculateGaussian(x, mean, stdev):\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)", "def gaussian_kernel(sigma, truncate=4.0):\n\n sigma = float(sigma)\n radius = int(truncate * sigma + 0.5)\n\n x, y = np.mgrid[-radius:radius + 1, -radius:radius + 1]\n sigma = sigma**2\n\n k = 2 * np.exp(-0.5 * (x**2 + y**2) / sigma)\n k = k / np.sum(k)\n\n return k", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def Gaussian(x, t, sigma):\n return np.exp(-(x - t)**2 / (2 * sigma**2))", "def gaussian(z, mean=0, stdev=None, sigma=1):\n sigma = stdev if stdev is not None else sigma\n norm = stats.norm(loc=mean, scale=sigma)\n pdf = norm.pdf(z, loc=mean, scale=sigma)\n return z, pdf", "def _generate_gaussian_kernel(self, size: int, sigma: float = 1.0, mu: float = 0.0) -> ndarray:\n # create the 1D array of equally spaced distance point of given size\n self.kernel_1d = np.linspace(-(size//2), size//2, size)\n # get the gaussian distribution of the 1D array\n self.kernel_1d = self._gaussian_distribution(\n self.kernel_1d, mu, sigma)\n\n # Compute the outer product of kernel1D tranpose and kernel1D\n self.kernel_2d = np.outer(self.kernel_1d.T, self.kernel_1d)\n # normalize the the outer product to suish the values between 0.0-1.0\n self.kernel_2d *= 1.0/self.kernel_2d.max()\n return self.kernel_2d", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian_filter(size,sigma=-1):\n\n if sigma == -1:\n sigma = np.sqrt(size)\n\n filter = np.zeros((size,size))\n\n for i,j in it.product(range(size),range(size)):\n x = j-size//2\n y = i-size//2\n filter[i,j] = 1/(2*np.pi*sigma**2) * np.exp(-(x**2+y**2)/(2*sigma**2))\n\n filter = filter/filter[0,0]\n filter = filter/filter.sum()\n\n return filter", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x /= x.sum()\n return x", "def Gaussiankernel(size, sigma=1): \n size = int(size) // 2\n # create x grid and y grid\n x, y = np.mgrid[-size:size+1, -size:size+1] \n # gaussian distribution formula\n normal = 1 / np.sqrt(2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n \n return g/g.sum()", "def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))", "def gaussian2d(x, amplitude=1.0, center_x=0.0, sigma_x=1.0, center_y=0.0, sigma_y=1.0, rota=0.0):\n \n if len(x) == 1:\n y = x\n else:\n (x, y) = x\n \n if not sigma_y:\n sigma_y = sigma_x\n \n if not center_y:\n center_y = center_x\n \n if rota:\n center_x = center_x*np.cos(np.deg2rad(rota)) - center_y*np.sin(np.deg2rad(rota))\n center_y = center_x*np.sin(np.deg2rad(rota)) + center_y*np.cos(np.deg2rad(rota)) \n \n x = x*np.cos(np.deg2rad(rota)) - y*np.sin(np.deg2rad(rota))\n y = x*np.sin(np.deg2rad(rota)) + y*np.cos(np.deg2rad(rota))\n \n norm = 2.*np.pi*sigma_x*sigma_y\n #exp_x = np.power((x - center_x)/(sigma_x), 2.)\n #exp_y = np.power((y - center_y)/(sigma_y), 2.)\n g = amplitude*np.exp(-(((center_x - x)/sigma_x)**2 + \\\n ((center_y - y)/sigma_y)**2)/2.)\n \n return g #(amplitude/norm)*np.exp(-(exp_x + exp_y)/2.)", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B", "def gaussian_white(z, mu: 'normal' = 0, sigma: (0.4, 1) = 0.7):\n return 1 - gaussian_black(z, mu, sigma)", "def gaussian_blurring(self,input_image,kernel_size,sigma):\n #Applying Gaussian Blur filter\n output_image=cv2.GaussianBlur(input_image,kernel_size,sigma)\n return output_image", "def make_gaussian(shape, var):\n h,w = shape\n x = np.arange(w, dtype=float)\n y = np.arange(h, dtype=float)[:,np.newaxis]\n x0 = w // 2\n y0 = h // 2\n mat = np.exp(-0.5 * (pow(x-x0, 2) + pow(y-y0, 2)) / var)\n normalized_img = np.zeros((h, w))\n cv2.normalize(mat, normalized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n return normalized_img", "def apply_gaussian(X, sigma):\n return np.array([ndimage.gaussian_filter(x, sigma) for x in X])", "def initiategaussian(sd, x0):\n y = np.exp(-x**2/(2*sd**2))\n return y", "def gaussian(\n x: float,\n measurement: 'mstats.ValueUncertainty',\n max_sigma: float = 10.0\n) -> float:\n center = measurement.value\n width = measurement.uncertainty\n width = max(width, 1e-6)\n\n if x <= (center - max_sigma * width) or x >= (center + max_sigma * width):\n # Don't calculate values outside a \"reasonable\" 10 sigma range\n return 0.0\n\n coefficient = 1 / math.sqrt(2.0 * math.pi * width * width)\n exponent = -0.5 * ((float(x) - center) ** 2) / (width * width)\n\n return coefficient * math.exp(exponent)", "def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background", "def get_sigma_psf(self):\n pass", "def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1):\n kernel = np.zeros((size, size), dtype=np.float32)\n \n size = int(size) // 2\n X = np.arange(-size,size+1)\n Y = np.arange(-size,size+1)\n \n for x in X:\n for y in Y:\n Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2)))\n Gy = np.exp(-((y-mu2)**2)/(2*(sigma2**2)))\n Gx = math.exp(-(math.pow(x-mu1,2))/(2*math.pow(sigma1,2)))\n Gy = math.exp(-(math.pow(y-mu2,2))/(2*math.pow(sigma2,2)))\n kernel[x+size,y+size] = Gx*Gy\n return kernel", "def gaussian_kernel(size, sigma):\n\n m, n = [(s - 1.) / 2. for s in size]\n y, x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp(-(x*x + y*y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\n sumh = h.sum()\n if sumh != 0: h /= sumh\n return h", "def model_gauss_noise(sigma, nx, ny=1, nz=1):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\te.process_inplace(\"testimage.noise.gauss\", {\"sigma\":sigma})\n\treturn e", "def gaussian(window_size, sigma):\n gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\n return gauss/gauss.sum()", "def gauss_kern(sigma,h):\n h1 = h\n h2 = h\n x, y = np.mgrid[0:h2, 0:h1]\n x = x-h2/2\n y = y-h1/2\n # sigma = 10.0\n g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) )\n return g / g.sum()", "def sigmanorm(y):\n y = y.copy()\n y -= y.mean() # set to zero mean\n y /= y.std() # rescale to units of sigma\n return y", "def add_gaussian(self, high, center, stdev):\n if callable(high):\n high = high()\n assert stdev > 0\n self.Y += high * np.exp(-0.5 * (self.Xnum-center)**2 / stdev**2)", "def _FSpecialGauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))\n return g / g.sum()", "def gaussian_kernel_2d(mean, std_inv, size):\n if type(mean) is torch.Tensor:\n device = mean.device\n elif type(std_inv) is torch.Tensor:\n device = std_inv.device\n else:\n device = \"cpu\"\n\n # repeat the size for width, height if single number\n if isinstance(size, numbers.Number):\n width = height = size\n else:\n width, height = size\n\n # expand std to (2, 2) matrix\n if isinstance(std_inv, numbers.Number):\n std_inv = torch.tensor([[std_inv, 0], [0, std_inv]], device=device)\n elif std_inv.dim() == 0:\n std_inv = torch.diag(std_inv.repeat(2))\n elif std_inv.dim() == 1:\n assert len(std_inv) == 2\n std_inv = torch.diag(std_inv)\n\n # Enforce PSD of covariance matrix\n covariance_inv = std_inv.transpose(0, 1) @ std_inv\n covariance_inv = covariance_inv.float()\n\n # make a grid (width, height, 2)\n X = torch.cat(\n [\n t.unsqueeze(-1)\n for t in reversed(\n torch.meshgrid(\n [torch.arange(s, device=device) for s in [width, height]]\n )\n )\n ],\n dim=-1,\n )\n X = X.float()\n\n # center the gaussian in (0, 0) and then shift to mean\n X -= torch.tensor([(width - 1) / 2, (height - 1) / 2], device=device).float()\n X -= mean.float()\n\n # does not use the normalize constant of gaussian distribution\n Y = torch.exp((-1 / 2) * torch.einsum(\"xyi,ij,xyj->xy\", [X, covariance_inv, X]))\n\n # normalize\n # TODO could compute the correct normalization (1/2pi det ...)\n # and send warning if there is a significant diff\n # -> part of the gaussian is outside the kernel\n Z = Y / Y.sum()\n return Z", "def gaussian_template(\n wavelengths: np.ndarray,\n mean: Union[float, np.ndarray],\n std: Union[float, np.ndarray] = 30.0,\n) -> np.ndarray:\n y = norm.pdf(wavelengths, mean, std)\n return y / np.max(y, axis=-1, keepdims=True)", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def gaussian_filter(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma))\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def random_gaussian_noise(image ):\n sigma = image.std()\n for ch in range(image.shape[2] ):\n sigma = min(sigma, image[:,:,ch].std() )\n image = random_noise(image, var = sigma**2)\n return image", "def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def matlab_style_gauss2D(shape=(3,3),sigma=0.5):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def GaussianKernel(radius, std):\n size = 2 * radius + 1\n weight = torch.ones(size, size)\n weight.requires_grad = False\n for i in range(-radius, radius+1):\n for j in range(-radius, radius+1):\n dis = (i * i) + (j * j)\n weight[i+radius][j+radius] = np.exp(-dis / (2 * std * std))\n weight = weight / weight.sum()\n return weight", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H", "def get_mask_gauss(mask_dist_src, sigma):\n return P.Exp()(-2.772588722 * (mask_dist_src ** 2) / (sigma ** 2))", "def gauss_kernels(size, sigma=1.0):\n if size < 3:\n size = 3\n\n m = size / 2\n x, y = np.mgrid[-m:m + 1, -m:m + 1]\n kernel = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n kernel_sum = kernel.sum()\n\n if not sum == 0:\n kernel = kernel / kernel_sum\n\n return kernel", "def fspecial_gaussian(shape=(3, 3), sigma=0.5):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def flatten(img,sigma=20.) :\n\n for i in range(img.shape[0]) :\n img[i] /= np.median(img[i])\n for i in range(img.shape[1]) :\n img[:,i] /= np.median(img[:,i])\n\n hw=int(3*sigma)\n u=np.linspace(-hw,hw,2*hw+1)\n x=np.tile(u,(2*hw+1,1))\n y=x.T\n k=np.exp(-x**2/2/sigma**2-y**2/2/sigma**2)\n k /= np.sum(k)\n smooth=convolve2d(img,k,weight=None)\n img /= smooth\n\n return img", "def _calc_sigma(self):\n if self.data is None:\n self._initial_blur()\n previous = self.init_sigma\n incr = 0\n self.sigmas = [(previous, incr)]\n for i in range(1, self.scale_per_octave + 3):\n sigma_abs = self.init_sigma * (self.dest_sigma / self.init_sigma) ** (1.0 * i / (self.scale_per_octave))\n increase = previous * sqrt((self.dest_sigma / self.init_sigma) ** (2.0 / self.scale_per_octave) - 1.0)\n self.sigmas.append((sigma_abs, increase))\n previous = sigma_abs\n logger.debug(\"Sigma= %s\" % self.sigmas)", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def gaussian(self, amp_step, sigma_step):\n l = len(self.overlaid_x_axis)\n x = np.linspace(0, l, l) - l/2 # centre of data\n\n # This is new code to 'guess' the size of the Gaussian from the\n # existing data rather than from hard-coded numbers.\n # TODO: test this! Possibly link up to the get_windowed_data function\n # as it uses a lot of the same functionality\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n amplitude = max(trace) + amp_step\n diff = np.diff(trigger)\n stepvalue = 0.5\n if min(diff) > -1 * stepvalue or max(diff) < stepvalue:\n raise RangeError\n else:\n maxtrig = next(x for x in diff if x > stepvalue)\n mintrig = next(x for x in diff if x < -1 * stepvalue)\n edges = [np.where(diff == maxtrig)[0][0],\n np.where(diff == mintrig)[0][0]]\n half_trigger_length = (edges[1]-edges[0])\n sigma = half_trigger_length/4 + sigma_step\n\n gauss = self.ax2.plot(amplitude * np.exp(-x**2 / (2 * sigma**2)), 'r')\n self.overlaid_lines.append(gauss)\n self.draw()", "def multi_gaussian(X, mu, sigma):\n m, n = X.shape\n X = X - mu\n\n factor = X.dot(inv(sigma))\n factor = multiply(factor, X)\n factor = - (1 / 2) * sum(factor, axis=1, keepdims=True)\n\n p = 1 / (power(2 * pi, n / 2) * sqrt(det(sigma)))\n p = p * exp(factor)\n\n return p", "def _sigma_1(gam, eps):\n s1 = 4 * r0**2 * alpha / eps / mec2_unit\n s2 = 1 + (1./3. - eps/gam) * (1 - eps/gam)\n s3 = np.log(2 * gam * (gam - eps) / eps) - 1./2.\n s3[np.where(gam < eps)] = 0.0\n return s1 * s2 * s3", "def gaussian_kernel(sigma, bin_width, truncate=10):\n\n nbins = sigma * truncate // bin_width\n bins = np.arange(-bin_width * nbins, bin_width * (nbins + 1), bin_width)\n return np.exp(-0.5 * bins**2 / (sigma**2)) * 1 / sigma / np.sqrt(2 * np.pi)", "def gauss_2d(N, sigma = 0.25):\r\n x, y = make_xy(N)\r\n sigma_pixel = sigma * np.float(N)\r\n arrayout = np.exp(-(x**2 + y**2) / sigma_pixel**2) / (np.pi * sigma_pixel**2)\r\n return arrayout", "def add_gaussian(self, high, center, stdev):\n indexes = dtloc2pos(center, self.X)\n\n if isinstance(stdev, timedelta):\n stdev = stdev.total_seconds() / self.step.total_seconds()\n elif not isinstance(stdev, (float, int)):\n raise TypeError(f\"Unknown type center {type(center)}\")\n\n for index in indexes:\n super().add_gaussian(\n high=high,\n center=index,\n stdev=stdev,\n )", "def gaussian_sigma(t, params):\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n sigma = tf.cast(params['sigma'].get_value(), dtype=tf.float64)\n gauss = tf.exp(-(t - t_final / 2) ** 2 / (2 * sigma ** 2))\n norm = (tf.sqrt(2 * np.pi * sigma ** 2)\n * tf.math.erf(t_final / (np.sqrt(8) * sigma))\n - t_final * tf.exp(-t_final ** 2 / (8 * sigma ** 2)))\n offset = tf.exp(-t_final ** 2 / (8 * sigma ** 2))\n return (gauss - offset) / norm", "def make_DOG(inner_sigma, x):\n y = x\n outer_sigma = inner_sigma*5\n X, Y = np.meshgrid(x, y)\n inner_gaussian = 1./(2.*np.pi*inner_sigma) * np.exp(-(X**2 + Y**2)/2./inner_sigma**2) \n outer_gaussian = 1./(2.*np.pi*outer_sigma) * np.exp(-(X**2 + Y**2)/2./outer_sigma**2) \n return inner_gaussian - outer_gaussian/2 #weaker surround works better with our weights, which don't account for bursts ", "def gaussian(var):\n stddev = np.sqrt(var)\n return stats.norm(0, stddev)", "def get_sigma(self):\n\n if not hasattr(self, 'sigma_zw'):\n self.get_sigma_zw()\n if not hasattr(self, 'sigmaw'):\n self.get_sigmaw()\n if not hasattr(self, 'sigmaz'):\n self.get_sigmaz()\n out = np.hstack((\n np.vstack((self.sigma_w, self.sigma_zw.reshape((-1, 1)))),\n np.vstack((self.sigma_zw, self.sigma_z))\n ))\n return out", "def norm(imagestack, mean, std):\n \n new_im = (imagestack - mean)/std \n \n return new_im" ]
[ "0.7622218", "0.76156443", "0.73544854", "0.71752834", "0.69022816", "0.68148255", "0.67936516", "0.6789439", "0.6705065", "0.66758764", "0.6667239", "0.66651136", "0.6639735", "0.65926224", "0.65801823", "0.6565116", "0.65491575", "0.6542619", "0.6531326", "0.6530046", "0.6523103", "0.6519727", "0.6508179", "0.6508179", "0.64856905", "0.6451151", "0.64402145", "0.64362067", "0.6431242", "0.63854766", "0.6370809", "0.6363925", "0.6360078", "0.6356346", "0.6353183", "0.6352357", "0.6350569", "0.6305891", "0.62952286", "0.6293415", "0.6293415", "0.6293415", "0.6286197", "0.6280724", "0.6278903", "0.6274311", "0.6270234", "0.6259633", "0.62589765", "0.62472814", "0.6244438", "0.62303096", "0.62300014", "0.6224764", "0.62183625", "0.6211588", "0.62019575", "0.6186674", "0.617777", "0.6151976", "0.61454535", "0.61311007", "0.60970306", "0.6093216", "0.6083889", "0.60780966", "0.60717964", "0.6068253", "0.6053314", "0.6047031", "0.6042388", "0.60368204", "0.60364133", "0.6024456", "0.6022659", "0.6016426", "0.601408", "0.601242", "0.60078555", "0.60078555", "0.5995167", "0.59921455", "0.5991372", "0.5983936", "0.5974868", "0.5973256", "0.59709895", "0.5959234", "0.5956632", "0.59550893", "0.5944672", "0.59417605", "0.59401304", "0.5937216", "0.59348196", "0.59280026", "0.59212506", "0.591163", "0.5907433", "0.5906628" ]
0.6426071
29
create a cylinder along z axis
def model_cylinder(radius, nx, ny, nz): e = EMData() e.set_size(nx, ny, nz) e.process_inplace("testimage.cylinder", {"radius":radius}) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cylinder(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n degree: Union[int, bool]=3, endSweep: Union[float, bool]=2, heightRatio:\n Union[float, bool]=2.0, nodeState: Union[int, bool]=0, pivot: Union[List[float,\n float, float], bool]=None, radius: Union[float, bool]=1.0, sections: Union[int,\n bool]=8, spans: Union[int, bool]=1, startSweep: Union[float, bool]=0, tolerance:\n Union[float, bool]=0.01, useTolerance: bool=False, constructionHistory: bool=True,\n name: AnyStr=\"\", object: bool=True, polygon: int=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def makecylinder(model=[0,0,0,1,0,0,1],height = 1,density=10):\n # extract info from cylinder model\n radius = model[6]\n X,Y,Z = model[:3]\n # get 3d points to make an upright cylinder centered to the origin\n n = np.arange(0,360,int(360/density))\n height = np.arange(0,height,height/density)\n n = np.deg2rad(n)\n x,z = np.meshgrid(n,height)\n x = x.flatten()\n z = z.flatten()\n cyl = np.vstack([np.cos(x)*radius,np.sin(x)*radius,z]).T\n # rotate and translate the cylinder to fit the model\n rotation = rotation_matrix_from_vectors([0,0,1],model[3:6])\n rotated_cylinder = np.matmul(rotation,cyl.T).T + np.array([X,Y,Z])\n return rotated_cylinder", "def z_cylinder(shape: (int, int, int), radius: int, dtype=np.uint8):\n z_depth = shape[2]\n xhalf_atom = shape[0] // 2\n yhalf_atom = shape[1] // 2\n cylinders = [\n ((xhalf_atom, yhalf_atom, 0),\n (xhalf_atom, yhalf_atom, z_depth - 1),\n radius)\n ]\n data_mask = create_cylinders_volume(shape, cylinders, foreground=1, dtype=dtype)\n return data_mask, cylinders", "def polyCylinder(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n constructionHistory: bool=True, createUVs: Union[int, bool]=2, height:\n Union[float, bool]=2.0, name: AnyStr=\"\", nodeState: Union[int, bool]=0,\n object: bool=True, radius: Union[float, bool]=1.0, roundCap: bool=False,\n subdivisionsAxis: Union[int, bool]=20, subdivisionsCaps: Union[int, bool]=0,\n subdivisionsHeight: Union[int, bool]=1, subdivisionsX: Union[int, bool]=20,\n subdivisionsY: Union[int, bool]=1, subdivisionsZ: Union[int, bool]=1, texture:\n Union[int, bool]=2, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def Cylinder(\n center=(0.0, 0.0, 0.0),\n direction=(1.0, 0.0, 0.0),\n radius=0.5,\n height=1.0,\n resolution=100,\n capping=True,\n):\n cylinderSource = _vtk.vtkCylinderSource()\n cylinderSource.SetRadius(radius)\n cylinderSource.SetHeight(height)\n cylinderSource.SetCapping(capping)\n cylinderSource.SetResolution(resolution)\n cylinderSource.Update()\n surf = wrap(cylinderSource.GetOutput())\n surf.rotate_z(-90, inplace=True)\n translate(surf, center, direction)\n return surf", "def cylinderize(self, z0=0.0, z1=1.0, support=(0.0, 1.0)):\n from .geometry import tensor_product, line_segment\n return tensor_product(line_segment(z0, z1, support=support), self)", "def createCylinder( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n \r\n faces = [ range(0,samples), range(samples,2*samples) ]\r\n for i in range(0,2*samples,2) :\r\n base = 2*samples\r\n size = 2*samples\r\n faces.append( (base+i, base+i+1, base+(i+3)%size, base+(i+2)%size ) )\r\n \r\n return create( vertices, faces, colour )", "def project_to_cylinder(points):\n r, phi, z = cs.cart2cyl(points[:, 0], points[:, 1], points[:, 2])\n # NOTE: z is unchanged\n # print(np.array_equal(z, points[:, 2]))\n # project onto unit cylinder\n x_cyl = points[:, 0] / r\n y_cyl = points[:, 1] / r\n z_cyl = (z - z.min()) / (z.max() - z.min())\n points_cyl = np.stack((x_cyl, y_cyl, z_cyl), axis=1)\n return points_cyl, r, phi, z", "def cylinder(downCirc = -120, upCirc = -70,radius = 15, resolution = 20):\n t = np.linspace(0, 2*m.pi, resolution)\n cylinderPos = []\n for num in t:\n x = -m.cos(num)*radius\n y = m.sin(num)*radius\n\n cylinderPos.append([x, y, downCirc, 0, 0, 0, 'mov'])\n\n for num in t:\n x = -m.cos(num)*radius\n y = m.sin(num)*radius\n\n cylinderPos.append([x, y, upCirc, 0, 0, 0, 'mov'])\n\n cylinderPos.append([0,0,-127,0,0,0,'mov'])\n return cylinderPos", "def cylinder(objname, coord1, coord2, radius, rgba, rgba2=None):\r\n if not rgba2:\r\n rgba2 = rgba\r\n return \\\r\n f'\\n{objname} = ' \\\r\n f'[CYLINDER, {coord1[0]}, {coord1[1]}, {coord1[2]}, ' \\\r\n f'{coord2[0]}, {coord2[1]}, {coord2[2]}, {radius}, ' \\\r\n f'{rgba[0]}, {rgba[1]}, {rgba[2]}, ' \\\r\n f'{rgba2[0]}, {rgba2[1]}, {rgba2[2]}] \\n'", "def cyl(diameter, height):\n global _cmds, fragments\n radius = diameter / 2\n _cmds = (\n f\"cylinder(h={height},\"\n f\"r1={radius},r2={radius},\"\n f\"center=false,$fn={fragments});\\n\\n\"\n ) + _cmds", "def create_cylinder(self, pt1, pt2, radius):\n ## Compute the cylinder axis and center.\n axis = [pt1[i] - pt2[i] for i in range(0,3)]\n length = vtk.vtkMath.Normalize(axis)\n center = [(pt1[i] + pt2[i])/2.0 for i in range(0,3)]\n\n # Determine angle to rotate cylinder into given axis.\n vec = [ 0.0, 1.0, 0.0 ]\n rotate_axis = 3*[0.0]\n tmp_cross = 3*[0.0]\n vtk.vtkMath.Cross(vec, axis, rotate_axis)\n radangle = math.atan2(vtk.vtkMath.Norm(rotate_axis), vtk.vtkMath.Dot(axis,vec))\n degangle = vtk.vtkMath.DegreesFromRadians(radangle)\n\n # Create cylinder.\n cylinder = vtk.vtkCylinderSource()\n cylinder.SetCenter(0.0,0.0,0.0);\n cylinder.SetHeight(length);\n cylinder.SetRadius(radius);\n cylinder.SetResolution(32)\n cylinder.Update()\n\n # Transform.\n transformer = vtk.vtkTransform()\n transformer.Translate(center[0], center[1], center[2])\n transformer.RotateWXYZ(degangle,rotate_axis)\n\n # Get the polydata (polygon mesh) for the transformed cylinder.\n polyDataTransformer = vtk.vtkTransformPolyDataFilter()\n polyDataTransformer.SetInputData(cylinder.GetOutput())\n polyDataTransformer.SetTransform(transformer)\n polyDataTransformer.Update()\n return polyDataTransformer.GetOutput()", "def cylinder(geometry,\n network,\n propname,\n seed='seed',\n **params):\n prob_fn = getattr(spst,params['name'])\n P = prob_fn(params['shape'],loc=params['loc'],scale=params['scale'])\n value=P.ppf(network.get_throat_data(prop=seed,locations=geometry))\n network.set_throat_data(locations=geometry,prop=propname,data=value)", "def vertical_cylinders(xy_size: int, z_depth: int, dtype=np.uint8):\n shape = (xy_size, xy_size, z_depth)\n image_size_px = shape[0] // 3\n z_depth = shape[2]\n half_atom = image_size_px // 2\n quarter_atom = image_size_px // 4\n cylinders = [\n # center cylinder, z-aligned, 64x64 radius = 16\n ((image_size_px + half_atom, image_size_px + half_atom, 0),\n (image_size_px + half_atom, image_size_px + half_atom, z_depth - 1),\n image_size_px // 4),\n # first tile overlapping to other tiles, z-aligned, 64x64 radius = 16\n ((image_size_px - quarter_atom, image_size_px - quarter_atom, 0),\n (image_size_px - quarter_atom, image_size_px - quarter_atom, z_depth - 1),\n image_size_px // 4),\n # lower middle tile overlapping to other tiles, z-aligned, 64x64 radius = 8\n ((image_size_px * 2 + quarter_atom, image_size_px + half_atom, 0),\n (image_size_px * 2 + quarter_atom, image_size_px + half_atom, z_depth - 1),\n image_size_px // 8),\n ]\n data_mask = create_cylinders_volume(shape, cylinders, foreground=1, dtype=dtype)\n return data_mask, cylinders", "def cylinder_on_axis(radius: int=5, axis: int=0, shape: (int, int, int)=(256, 256, 256)):\n assert axis < len(shape), \"Invalid axis index specified, must be < len(shape)\"\n r = phantom_base.scale_radius_to_basecoord(radius, shape)\n\n def cylinder_axis(t):\n u = -1 + (t * 2), 0, 0\n # roll the elements to have the formula on the target axis\n return tuple(np.roll(u, axis)) + (r,)\n\n # TODO: we need to scale step_count appropriately for the input shape; maybe half?\n nsteps = max(shape)\n return phantom_base.trace_function(cylinder_axis, shape=shape, step_count=nsteps)", "def createCone( basePoint=(0,-1,0), tipPoint=(0,1,0), radius = 1.0, colour=(0.6,0.6,0.6), samples = 20 ):\r\n \r\n basePoint = PyUtils.toPoint3d(basePoint)\r\n tipPoint = PyUtils.toPoint3d(tipPoint)\r\n baseToTipVector = Vector3d(basePoint,tipPoint)\r\n if baseToTipVector.isZeroVector() :\r\n raise ValueError( 'Invalid points for cylinder: base and tip are equal!' )\r\n baseToTipUnitVector = baseToTipVector.unit()\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,0,1) )\r\n if xUnitVector.length() < 0.5 :\r\n xUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,-1,0) )\r\n xUnitVector.toUnit()\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(-1,0,0) )\r\n if yUnitVector.length() < 0.5 :\r\n yUnitVector = baseToTipUnitVector.crossProductWith( Vector3d(0,1,0) )\r\n yUnitVector.toUnit()\r\n\r\n vertices = []\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n for i in range(samples):\r\n theta = i * 2 * math.pi / float(samples)\r\n vertices.append( basePoint + xUnitVector * math.cos(theta) * radius + yUnitVector * math.sin(theta) * radius )\r\n vertices.append( tipPoint )\r\n \r\n faces = [ range(0,samples) ]\r\n for i in range(0,samples) :\r\n base = samples\r\n size = samples\r\n faces.append( (base+i, base+(i+1)%size, 2*samples ) )\r\n \r\n return create( vertices, faces, colour )", "def HollowCylinder(self,center=(0,0,0),inner_radius=1.0,outer_radius=2.,\n element_type='hex',isotropic=True,nrad=5,ncirc=10, nlong=20,length=10):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.HollowCircle(center=(center[0],center[1]), inner_radius=inner_radius,\n outer_radius=outer_radius, element_type=\"quad\",\n isotropic=isotropic, nrad=nrad, ncirc=ncirc)\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def template_cylinder_annulus(height, outer_radius, inner_radius=0):\n\n img = _template_sphere_disc(dim=2, outer_radius=outer_radius,\n inner_radius=inner_radius)\n img = np.tile(np.atleast_3d(img), reps=height)\n return img", "def cylinder_volume(radius: number, height: number) -> number:\n volume = pi*radius*radius*height\n return volume", "def OneElementCylinder(self,radius=1, length=100, nz=10, element_type=\"hex\"):\n\n if element_type == \"hex\":\n elements = np.arange(0,8)[:,None]\n for i in range(1,nz):\n elements = np.concatenate((elements,np.arange(4*i,4*i+8)[:,None]),axis=1)\n elements = elements.T.copy()\n\n theta = np.array([225,315,45,135])*np.pi/180\n\n xs = np.tile(radius*np.cos(theta),nz+1)\n ys = np.tile(radius*np.sin(theta),nz+1)\n\n points = np.array([xs,ys]).T.copy()\n points = np.concatenate((points,np.zeros((4*(nz+1),1))),axis=1)\n\n zs = np.linspace(0,length, nz+1)[1:]\n zs = np.repeat(zs,4)\n points[4:,-1] = zs\n\n if element_type == \"hex\":\n self.element_type = element_type\n self.elements = elements\n self.points = points\n self.GetBoundaryFacesHex()\n self.GetBoundaryEdgesHex()\n self.GetFacesHex()\n self.GetEdgesHex()\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n\n elif element_type == \"tet\":\n # USE MESHPY\n raise NotImplementedError('Not implemented yet')\n else:\n raise ValueError('element type not suppported')", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def __init__(self,x_c,z_c,cyl_rad):\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad", "def add_cylinder_between(pt1, pt2, r, name=None):\n pt1 = np.array(pt1)\n pt2 = np.array(pt2)\n\n d = pt2 - pt1\n\n # Add cylinder at the correct location\n dist = np.linalg.norm(d)\n loc = (pt1[0] + d[0] / 2, pt1[1] + d[1] / 2, pt1[2] + d[2] / 2)\n bpy.ops.mesh.primitive_cylinder_add(radius=r, depth=dist, location=loc)\n\n cylinder_obj = bpy.context.object\n\n if name is not None:\n cylinder_obj.name = name\n\n # Further rotate it accordingly\n phi = np.arctan2(d[1], d[0])\n theta = np.arccos(d[2] / dist)\n cylinder_obj.rotation_euler[1] = theta\n cylinder_obj.rotation_euler[2] = phi\n\n # Scene update necessary, as matrix_world is updated lazily\n bpy.context.scene.update()\n\n return cylinder_obj", "def build_cylinders(self, vtk_fname, zscale=1, radius=20):\r\n centers = []\r\n heights = []\r\n for w in self.wells:\r\n b = w['LOC']\r\n t = [b[0], b[1], 1]\r\n b_surf = self.cell_verts(b)\r\n t_surf = self.cell_verts(t)\r\n minZ = t_surf[0][2]\r\n b_v = self.centroid(b_surf)\r\n for i in range(len(b_surf)):\r\n t_v = t_surf[i]\r\n if t_v[2] < minZ:\r\n minZ = t_v[2]\r\n cyl_height = abs(b_v[2] - minZ)\r\n cyl_center = [b_v[0], b_v[1], minZ + cyl_height/2]\r\n centers.append(cyl_center)\r\n heights.append(cyl_height)\r\n # Write center, height data to string\r\n c_s = '['\r\n for c in centers:\r\n c_s += '[' + ', '.join(map(str, c)) + '],\\n'\r\n c_s = c_s[:-2] + ']\\n\\n'\r\n h_s = '['\r\n for h in heights:\r\n h_s += str(h) + ',\\n'\r\n h_s = h_s[:-2] + ']\\n\\n'\r\n # Write Paraview script\r\n with io.open('well_cylinders_test.py', 'w', newline='\\r\\n') as f:\r\n f.write(\"from paraview.simple import *\\n\")\r\n f.write(\"paraview.simple._DisableFirstRenderCameraReset()\\n\\n\")\r\n f.write(\"# To use, open grid vtk files in Paraview, then run this script in Python Shell\\n\")\r\n f.write(\"c = \" + c_s)\r\n f.write(\"h = \" + h_s)\r\n f.write(\"for i in range(len(h)):\\n\")\r\n f.write(\" cylinder = Cylinder()\\n\")\r\n f.write(\" fn = FindSource('\" + vtk_fname + \"_*')\\n\")\r\n f.write(\" rv = GetActiveViewOrCreate('RenderView')\\n\")\r\n f.write(\" tf = Transform(Input=cylinder)\\n\")\r\n f.write(\" tf.Transform = 'Transform'\\n\")\r\n f.write(\" tf.Transform.Translate = [c[i][0], c[i][1], c[i][2] * \" + str(zscale) + \"]\\n\")\r\n f.write(\" tf.Transform.Rotate = [90.0, 0.0, 0.0]\\n\")\r\n f.write(\" tf.Transform.Scale = [1.0, \" + str(zscale) + \", 1.0]\\n\")\r\n f.write(\" transformDisplay = Show(tf, rv, 'GeometryRepresentation')\\n\")\r\n f.write(\" cylinder.Radius = \" + str(radius) + \"\\n\")\r\n f.write(\" cylinder.Height = h[i]\\n\")\r\n f.write(\" rv.Update()\")", "def Cylinder(self, center=(0.,0.,0.), radius=1., length=10., nrad=16, ncirc=40, nlong=50, element_type=\"hex\", algorithm=\"standard\"):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.Circle(center=(center[0],center[1]), radius=radius, nrad=nrad, ncirc=ncirc, element_type=\"quad\", algorithm=algorithm)\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def plot_eccentricity(self, z=0):\n p = figure(\n title=\"Cut in plane Z=\" + str(z),\n x_axis_label=\"X axis\",\n y_axis_label=\"Y axis\",\n )\n for j in range(0, self.ntheta):\n p.circle(self.xre[z][j], self.yre[z][j], color=\"red\")\n p.circle(self.xri[z][j], self.yri[z][j], color=\"blue\")\n p.circle(0, 0, color=\"blue\")\n p.circle(self.xi, self.yi, color=\"red\")\n p.circle(0, 0, color=\"black\")\n return p", "def unit_z(cls):\n return cls(0, 0, 1)", "def build_cylinder(self, n_phis):\n index = glGenLists(1)\n phis = [float(i)*2.0*numpy.pi/float(n_phis) for i in range(n_phis+1)]\n phi_pairs = zip(phis, phis[1:])\n glNewList(index, GL_COMPILE)\n glBegin(GL_QUADS)\n for phi1,phi2 in phi_pairs:\n dot1 = min(max(numpy.cos(phi1), 0.0), 1.0)\n dot2 = min(max(numpy.cos(phi2), 0.0), 1.0)\n glTexCoord1f(dot1)\n glVertex3f(-0.5, numpy.sin(phi1), numpy.cos(phi1))\n glTexCoord1f(dot1)\n glVertex3f(0.5, numpy.sin(phi1), numpy.cos(phi1))\n glTexCoord1f(dot2)\n glVertex3f(0.5, numpy.sin(phi2), numpy.cos(phi2))\n glTexCoord1f(dot2)\n glVertex3f(-0.5, numpy.sin(phi2), numpy.cos(phi2))\n glEnd()\n glEndList()\n return index", "def in_cylinder(x, y, z, min_z, max_z, max_r):\n r = np.sqrt(x ** 2 + y ** 2)\n m = r < max_r\n m = m & (z < max_z)\n m = m & (z >= min_z)\n return m", "def tube(outside_diam, inside_diam, height):\n global _cmds, fragments\n r1 = outside_diam / 2\n r2 = inside_diam / 2\n _cmds = (\n \"difference(){\\n\"\n f\"cylinder(h={height},r1={r1},r2={r1},\"\n f\"center=false,$fn={fragments});\\n\"\n f\"cylinder(h={height*3},r1={r2},r2={r2},\"\n f\"center=true,$fn={fragments});\\n\"\n \"}\\n\") + _cmds", "def cross_z(self):\n return Vector((self.v.y, -self.v.x))", "def rotateZ(self, *args, **kwargs):\n ...", "def cylinder_volume(diameter, length):\n return 3.14159 * ((diameter/2) ** 2) * length", "def cross_z(self):\n return self.v.cross(Vector((0, 0, 1)))", "def wheel(self):\n return FilletedSolid(\n built_from=\n TranslatedShape(shape_in=\n RotatedShape(shape_in=\n Cylinder(radius=\n self.wheels_properties[1],\n height=300.,\n position=self.position),\n rotation_point=self.position,\n vector=Vector(1, 0, 0),\n angle=radians(90)),\n displacement=Vector(self.wheels_properties[0],\n 300.,\n -self.positions[1][0])),\n radius=30.)", "def p2pCyl(startPoint, endPoint, radius=10, modName=\"Cyl\", plus=0, Seg=3,\n\t\t\t color=\"red\", Opacity=1, RotY=0, Tx=0):\n\t\tcylinderSource = vtk.vtkCylinderSource()\n\t\tcylinderSource.SetRadius(radius)\n\t\tcylinderSource.SetResolution(Seg)\n\n\t\trng = vtk.vtkMinimalStandardRandomSequence()\n\t\trng.SetSeed(8775070) # For testing 8775070\n\n\t\t# Compute a basis\n\t\tnormalizedX = [0] * 3\n\t\tnormalizedY = [0] * 3\n\t\tnormalizedZ = [0] * 3\n\n\t\t# The X axis is a vector from start to end\n\t\tvtk.vtkMath.Subtract(endPoint, startPoint, normalizedX)\n\t\tlength = vtk.vtkMath.Norm(normalizedX) + plus\n\t\t# length = 20\n\t\tvtk.vtkMath.Normalize(normalizedX)\n\n\t\t# The Xn axis is an arbitrary vector cross X\n\t\tarbitrary = [0] * 3\n\t\tfor i in range(0, 3):\n\t\t\trng.Next()\n\t\t\tarbitrary[i] = rng.GetRangeValue(-10, 10)\n\t\tvtk.vtkMath.Cross(normalizedX, arbitrary, normalizedZ)\n\t\tvtk.vtkMath.Normalize(normalizedZ)\n\n\t\t# The Zn axis is Xn cross X\n\t\tvtk.vtkMath.Cross(normalizedZ, normalizedX, normalizedY)\n\t\tmatrix = vtk.vtkMatrix4x4()\n\t\t# Create the direction cosine matrix\n\t\tmatrix.Identity()\n\t\tfor i in range(0, 3):\n\t\t\tmatrix.SetElement(i, 0, normalizedX[i])\n\t\t\tmatrix.SetElement(i, 1, normalizedY[i])\n\t\t\tmatrix.SetElement(i, 2, normalizedZ[i])\n\t\t# Apply the transforms\n\t\ttransform = vtk.vtkTransform()\n\t\ttransform.Translate(startPoint) # translate to starting point\n\t\ttransform.Concatenate(matrix) # apply direction cosines\n\t\ttransform.RotateZ(-90.0) # align cylinder to x axis\n\t\ttransform.Scale(1.0, length, 1.0) # scale along the height vector\n\t\ttransform.Translate(0, .5, 0) # translate to start of cylinder\n\t\ttransform.RotateY(RotY)\n\t\ttransform.Translate(Tx, 0, 0)\n\n\t\t# Transform the polydata\n\t\ttransformPD = vtk.vtkTransformPolyDataFilter()\n\t\ttransformPD.SetTransform(transform)\n\t\ttransformPD.SetInputConnection(cylinderSource.GetOutputPort())\n\n\t\tstlMapper = vtk.vtkPolyDataMapper()\n\t\tstlMapper.SetInputConnection(transformPD.GetOutputPort())\n\t\tvtkNode = slicer.modules.models.logic().AddModel(transformPD.GetOutputPort())\n\t\tvtkNode.SetName(modName)\n\t\tmodelDisplay = vtkNode.GetDisplayNode()\n\t\tmodelDisplay.SetColor(Helper.myColor(color))\n\t\tmodelDisplay.SetOpacity(Opacity)\n\t\tmodelDisplay.SetBackfaceCulling(0)\n\t\t# modelDisplay.SetVisibility(1)\n\t\tmodelDisplay.SetVisibility2D(True)\n\t\t# modelDisplay.SetSliceDisplayModeToProjection()\n\t\t# dn.SetVisibility2D(True)\n\t\treturn", "def _zforce_xyz(self,x,y,z):\n return -2.*np.pi*self._rhoc_M * self.a**3*self._b*self._c * \\\n _forceInt(x, y, z, self._a2, self._b2*self._a2, self._c2*self._a2, self.n, 2)", "def ArcCylinder(self, center=(0.,0.,0.), radius=1., start_angle=0, end_angle=np.pi/2.,\n length=10., nrad=16, ncirc=40, nlong=50, element_type=\"hex\"):\n\n if element_type != \"hex\":\n raise NotImplementedError('Generating {} mesh of cylinder is not supported yet'.format(element_type))\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center for the base of the cylinder should be given in a tuple with three elements (x,y,z)\")\n\n self.__reset__()\n\n nlong = int(nlong)\n if nlong==0:\n nlong = 1\n\n mesh = Mesh()\n mesh.Arc(center=(center[0],center[1]), radius=radius, start_angle=start_angle,\n end_angle=end_angle, nrad=nrad, ncirc=ncirc, element_type=\"quad\")\n\n self.Extrude(base_mesh=mesh, length=length, nlong=nlong)\n self.points += center[2]", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def render_capsule(self, l, r):\n # draw cylinder\n glPushMatrix()\n glScalef(l, r, r)\n glCallList(self.cylinder)\n glPopMatrix()\n # draw +x hemisphere\n glPushMatrix()\n glTranslatef(l/2.0, 0, 0)\n glScalef(r, r, r)\n glCallList(self.hemisphere)\n glPopMatrix()\n # draw -x hemisphere\n glPushMatrix()\n glRotatef(180.0, 0, 0, 1)\n glTranslatef(l/2.0, 0, 0)\n glScalef(r, r, r)\n glCallList(self.hemisphere)\n glPopMatrix()", "def createDetectorCylinder(self,uNum=1):\n cellsCreated = 0\n sNum = self.SurfaceStartNum\n cNum = self.CellStartNum\n detectorCells = list()\n s = '{:5d} rcc 0 0 0 0 0 217.7 {}\\n'.format(self.SurfaceStartNum,self.geoParam['CylinderRadius'])\n c = ''\n keyList = sorted(self.surfGeo.keys(), key = lambda x: float(x))\n for key in keyList:\n sPrev = sNum\n sNum += 1\n cNum += 1\n s += self.surfForStr.format(sNum,key)\n m = self.material[self.surfGeo[key]]\n if cNum == self.CellStartNum+1:\n c+= '{:5d} {:d} -{:4.3f} -{:d} u={:d}\\n'.format(cNum,m['mt'],m['rho'],sNum,uNum)\n else:\n c += self.cellForStr.format(cNum,m['mt'],m['rho'],sPrev,sNum,uNum)\n # List of cells for the detector\n if self.surfGeo[key] is 'Detector':\n detectorCells.append(cNum)\n cellsCreated += 1\n # Last cell up to universe boundary\n m = self.material['Moderator']\n c += '{:5d} {:d} -{:4.3f} {:d} u={:d}\\n'.format(cNum+1,m['mt'],m['rho'],sNum,uNum)\n cellsCreated += 1\n return s,c,detectorCells,cellsCreated", "def rot_z(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rz = np.array([[cangle, sangle, 0.0],\n [-sangle, cangle, 0.0],\n [0.0, 0.0, 1.0]])\n return rz", "def make_cylindrical(self, grid, axis='z'):\n if grid.shape != self.shape:\n raise NotImplementedError('make_cylindrical: grid shape mismatch'\n ' not supported')\n gt_flat = grid.t.flat\n self.r = self.x.copy()\n self.t = self.x.copy()\n r_flat = self.r.flat\n t_flat = self.t.flat\n\n if axis == 'z':\n x_flat = self.x.flat\n y_flat = self.y.flat\n for i in range(self.x.size):\n gt = gt_flat[i]\n x = x_flat[i]\n y = y_flat[i]\n magnitude = hypot(x, y)\n rel_theta = atan2(y, x) - gt\n r_flat[i] = magnitude * cos(rel_theta)\n t_flat[i] = magnitude * sin(rel_theta)\n self.x = None\n self.y = None\n\n elif axis == 'x':\n y_flat = self.y.flat\n z_flat = self.z.flat\n for i in range(self.y.size):\n gt = gt_flat[i]\n y = y_flat[i]\n z = z_flat[i]\n magnitude = hypot(y, z)\n rel_theta = atan2(z, y) - gt\n r_flat[i] = magnitude * cos(rel_theta)\n t_flat[i] = magnitude * sin(rel_theta)\n self.z = self.x\n self.x = None\n self.y = None\n\n else:\n raise ValueError(\"axis must be 'z' or 'x'\")", "def create_cylinders_volume(\n shape: (int, int, int),\n cylinders_list: list,\n foreground=1,\n dtype=np.uint8):\n volume = np.zeros(shape, dtype=bool)\n for point1, point2, radius in cylinders_list:\n volume = add_cylinder_px(volume, point1, point2, radius)\n return volume_bool_to_dtype(volume, fg=foreground, dtype=dtype)", "def c(self, z, y, r, t):\n \n u = np.zeros( self.m ) \n \n return u", "def rotateZ(self, ang):\n if ang != 0.0:\n x = self.getX() * cos(ang) - self.getY() * sin(ang)\n y = self.getX() * sin(ang) + self.getY() * cos(ang)\n z = self.getZ()\n self.setX(x)\n self.setY(y)\n self.setZ(z)", "def rotateZ(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n x = self.x * cosa - self.y * sina\r\n y = self.x * sina + self.y * cosa\r\n return Point3D(x, y, self.z)", "def cylindervert_addedmass(R, z1, z2, rho, Ca=1, AxCa=1,\n m_f=0, z_f=0, m_mg=0, z_mg=0):\n if z1<z2:\n raise Exception('z1 should be above z2')\n if z1<0:\n # Fully submerged\n ztop = z1\n A0=0\n nAx=2\n else:\n # Partially submerged\n ztop = 0 \n A0 = np.pi*R**2 # undisplaced waterplane area of platform (m^2)\n nAx=1\n\n h = ztop-z2 # submerged height\n z_b = (ztop+z2)/2 # coordinates of the center of buoyancy of the undisplaced platform (m)\n V0 = np.pi*R**2*h # undisplaced volume of platform (m^3)\n\n M=np.zeros((6,6))\n M[0,0] = Ca * rho*V0\n M[1,1] = Ca * rho*V0\n M[2,2] = nAx*AxCa * 2/3*rho*np.pi * R**3 # rho*V0* D/(3*h)\n M[4,0] = M[0,0]*z_b # TODO empirical\n M[3,1] = -M[0,0]*z_b # TODO empirical\n M[0,4] = M[0,0]*z_b # TODO empirical\n M[1,3] = -M[0,0]*z_b # TODO empirical\n\n T1 =Ca*rho*np.pi*R**2 * h**3 /3 # Ca * rho*V0 * h**2/3 # TODO a bit empirical\n M[3,3] = T1\n M[4,4] = T1\n return M", "def showSimpleCylinders(self): \n #from morphforge.morphology.util import TriMeshBuilderVerySimple\n import sys\n sys.path.append('/usr/share/pyshared/')\n \n #import morphforge\n from morphforge.morphology.mesh import MeshBuilderRings\n MonkeyPatchMayaVi()\n #import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n assert len(self.morphs)==1\n mesh = MeshBuilderRings().build(self.morphs[0])\n \n \n @mlab.show\n def _showSimpleCylinders():\n\t \n #c = TriMeshBuilderVerySimple(self.morphs[0])\n #mlab.triangular_mesh(c.x, c.y, c.z, c.triangles, colormap=self.colormap)\n mlab.triangular_mesh(mesh.vertices[:,0], mesh.vertices[:,1], mesh.vertices[:,2], mesh.triangles, colormap=self.colormap)\n \n _showSimpleCylinders()", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def surface_area_cylinder(radius: float, height: float) -> float:\r\n if radius < 0 or height < 0:\r\n raise ValueError(\"surface_area_cylinder() only accepts non-negative values\")\r\n return 2 * pi * radius * (height + radius)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def test_cylindrical(self):\n # Rotate around the z axis\n r = Joint.cylindrical(np.array([0, 0, 1]))\n t_mat = r(np.array([np.pi / 2, 1.0]))\n\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], 1))", "def rotateZ(self, angle):\n rad = math.radians(angle)\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n\t\trad = angle * math.pi / 180\n\t\tcosa = math.cos(rad)\n\t\tsina = math.sin(rad)\n\t\tx = self.x * cosa - self.y * sina\n\t\ty = self.x * sina + self.y * cosa\n\t\treturn Point3D(x, y, self.z)", "def test_circular_scatter_z():\n area = [0, 1000, 0, 1000]\n size = 1000\n z_value = 500\n x, y, z = gridder.circular_scatter(area, size, z=z_value, random=False)\n assert z.size == size\n npt.assert_allclose(z, z_value + np.zeros(size))", "def RunCylinder(l,p,cylinderPositions):\n # Creating input and output deck names\n posString = ''\n for pos in cylinderPositions:\n posString += '{:2.1f}-'.format(pos[0])\n posString = posString.rstrip('-')\n inp='Cyl_{}LiF_{}_{}.mcnp'.format(int(l*100),p,posString)\n name='OUTCyl_{}LiF_{}_{}.'.format(int(l*100),p,posString)\n print inp\n # Creating and running the model\n m = CylinderRPM()\n m.createSurfaceGeo()\n m.setMaterial(l,p)\n m.createDetectorCylinder()\n m.createInputDeck(cylinderPositions,inp,name)\n m.runModel()", "def squeeze(self, z, mode):\n phi = angle(z)\n r = abs(z)\n self.circuit.squeeze(r, phi, mode)", "def generate(self):\n inside = self.crystal.is_inside(self.x,self.y,self.z)\n X = np.vstack((self.x[inside],self.y[inside],self.z[inside]))\n return self.rot.rotate(X)", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def polyCone(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n constructionHistory: bool=True, createUVs: Union[int, bool]=2, height: Union[float,\n bool]=2.0, name: AnyStr=\"\", nodeState: Union[int, bool]=0, object: bool=True,\n radius: Union[float, bool]=1.0, roundCap: bool=False, subdivisionsAxis: Union[int,\n bool]=20, subdivisionsCap: Union[int, bool]=0, subdivisionsHeight: Union[int,\n bool]=1, subdivisionsX: Union[int, bool]=20, subdivisionsY: Union[int, bool]=1,\n subdivisionsZ: Union[int, bool]=0, texture: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def _add_cylinder_basic(self, point1, point2, radius):\n\n self._cylinder_end_nodes.append(point1)\n self._cylinder_end_nodes.append(point2)\n self._cylinder_end_nodes_radiuses.append(radius)\n self._cylinder_end_nodes_radiuses.append(radius)\n self.object_number += 1", "def zaxis ( self ) :\n return self.__zaxis", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])", "def create_triangle( xsize, ysize, zsize, place=(0,0,0), rotate=(1,0,0,0) ):\n \n b1 = create_box( (xsize,ysize,zsize), place=(0,0,0 ) )\n ms = max( xsize, zsize )\n angle = math.atan( zsize / xsize ) * 180 / math.pi\n b2 = create_box( (2*ms, 2*ysize, 2*ms), place=(0,-EPS,0), rotate=(0,1,0,-angle) )\n tr = create_cut( b1, b2 )\n return relocate( tr, place, rotate )", "def _derZ(self, w, x, y, z):\n raise NotImplementedError()", "def add_vehicle_z_axis(df):\n x = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_x\",\n )\n y = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_y\",\n )\n z = pd.Series(\n np.ones(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_z\",\n )\n x, y, z = mpd.series_quatrot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n\n df[x.name] = x.values\n df[y.name] = y.values\n df[z.name] = z.values", "def __init__(self, pos, heading=vector(0,0,1)):\n # list of vPython 3D shapes that make up this player\n self.parts = []\n \n self.pos = vector(pos)\n # Direction in which robot is moving, normalized to unit length\n self.heading = norm(heading)\n\n self.radius = 1.0\n\n self.velocity = vector(0,0,0)\n\n face = cylinder(pos=self.pos, axis = (0,1.5,0), radius=.75,\n color=color.white, material = materials.chrome)\n self.parts += [face] \n\n self.head = sphere(pos=self.pos, radius = .75, color = color.white, material = materials.chrome)\n self.parts += [self.head]\n\n \n left_eye = sphere(pos=self.pos+vector(.35,.4,.6), \n radius=0.36, color=color.blue, material = materials.emissive)\n self.parts += [left_eye]\n right_eye = sphere(pos=self.pos+vector(-.35,.4,.6),\n radius=0.36, color=color.blue, material = materials.emissive)\n self.parts += [right_eye]\n\n neck = cylinder(pos=self.pos+vector(0,-1,0), axis = (0,.5,0), radius = .05, color=color.white)\n self.parts += [neck]\n\n self.body = cylinder(pos=self.pos+vector(0,-1.75,0),axis = (0,.75,-.2), radius = .35, color=color.white, material = materials.chrome)\n self.parts += [self.body]\n\n bottom = sphere(pos=self.pos+vector(0,-1.75,0), radius =.35, color = color.white, material = materials.chrome)\n self.parts += [bottom]\n\n right_shoulder = sphere(pos = self.pos+vector(-.35,-1,0), radius = .20, color = color.blue, material = materials.chrome)\n self.parts += [right_shoulder]\n\n left_shoulder = sphere(pos= self.pos+vector(.35,-1,0), radius = .20, color = color.blue, material = materials.chrome)\n self.parts += [left_shoulder]\n\n right_arm = cone(pos = self.pos+vector(-.36, -1.1, 0), axis = (-.2, -.7, -.4), radius = .12, color = color.white, material = materials.chrome)\n self.parts += [right_arm]\n\n left_arm = cone(pos = self.pos+vector(.36, -1.1, 0), axis = (.2, -.7, -.4), radius = .12, color = color.white, material = materials.chrome)\n self.parts += [left_arm]\n\n right_leg = cone(pos = self.pos+vector(-.32, -2.85, 0), axis = (.1, .8, .1), radius = .2, color = color.white, material = materials.chrome)\n self.parts += [right_leg]\n\n left_leg = cone(pos = self.pos+vector(.32,-2.15,.8), axis = (-.1, .1, -.8), radius = .2, color = color.white, material = materials.chrome)\n self.parts += [left_leg]", "def rotate_z(self, angle: float):\n self.vertices = list(\n Matrix44.z_rotate(angle).transform_vertices(self.vertices)\n )\n return self", "def _derZ(self, x, y, z):\n raise NotImplementedError()", "def test_rotate_around_v3_z_axis(self):\n from pedemath.vec3 import rotate_around_vector_v3\n\n vec_a = Vec3(3, 4, 5)\n vec_b = Vec3(0, 0, 1)\n\n result = rotate_around_vector_v3(vec_a, math.pi, vec_b)\n expected = Vec3(-3, -4, 5)\n\n self.assertAlmostEqual(result.x, expected.x)\n self.assertAlmostEqual(result.y, expected.y)\n self.assertAlmostEqual(result.z, expected.z)", "def cross(self):\n return self.v.cross(self.z_axis)", "def draw_car():\r\n\twheel_radius = .5\r\n\twheel_thickness = .4\r\n\r\n\tglPushMatrix()\r\n\r\n\t# shift the car up so the base lies at the origin\r\n\tglTranslatef(0,wheel_radius,0)\r\n\t\r\n\tdraw_car_body()\r\n\r\n\t# draw the car wheels\r\n\t# assume the car is facing down the -z axis\r\n\t# front left, front right, back left, back right\r\n\tww = wheel_thickness/2\r\n\twheel_centers = [(-.5-ww,0,-1),(.5+ww,0,-1),(-.5-ww,0,1),(.5+ww,0,1)]\r\n\tfor i in range(4):\r\n\t\tglPushMatrix()\r\n\t\tapply(glTranslatef,wheel_centers[i])\r\n\t\tglRotatef(90,0,1,0)\r\n\t\tglScalef(wheel_radius,wheel_radius,wheel_radius)\r\n\t\tdraw_wheel()\r\n\t\tglPopMatrix()\r\n\r\n\tglPopMatrix()", "def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()", "def drawCurve3D(xlist, ylist, zlist):\n dislin.curv3d(xlist,ylist,zlist,len(xlist))", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def drawSphere3D(x0,y0,z0, radius, hres, vres):\n dislin.sphe3d(x0,y0,z0, radius, hres, vres)", "def cbar( self , y , t = 0 ):\n \n u = self.c( self.zbar, y , self.rbar , t )\n \n return u", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rotz(t):\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])", "def rz(self, angle: float, center: Onion[Iterable[Onion[float, int]], Point3D] = None) -> 'Component':\n return self.rotate(rz=angle, center=center)", "def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area", "def cylindrical2cartesian(cylinder):\n cart = np.zeros(cylinder.shape)\n cart[:, 0] = cylinder[:, 0] * np.cos(cylinder[:, 1])\n cart[:, 1] = cylinder[:, 0] * np.sin(cylinder[:, 1])\n cart[:, 2] = cylinder[:, 2]\n return cart", "def unoriented_cube():\n faces = get_oriented_cube_faces()\n for face in faces:\n np.random.shuffle(face)\n poly = Polyhedron(get_cube_points(), faces, faces_are_convex=True)\n poly.sort_faces()\n return poly", "def add_cylinder_px(volume,\n point1: (int, int, int),\n point2: (int, int, int),\n radius: int):\n p1, p2, r = cylinders_px_to_basecoord([(point1, point2, radius)], volume.shape)[0]\n return add_cylinder_basecoord(volume, p1, p2, r)", "def plane(*args, length: float=0.0, name: AnyStr=\"\", position: List[float, float, float]=None,\n rotation: List[float, float, float]=None, size: float=0.0, width: float=0.0,\n **kwargs)->AnyStr:\n pass", "def get_z_axis(self, pipette_id: str) -> MotorAxis:\n mount = self.get(pipette_id).mount\n return MotorAxis.LEFT_Z if mount == MountType.LEFT else MotorAxis.RIGHT_Z", "def cz(self, q0, q1, ctrl=None):\n self.__add_quantum_gate(kind=CONTROLLED_Z, qid=[q0,q1], ctrl=ctrl)\n return self", "def tapered_cylinder_geom(R1, R2, H):\r\n m = (R2-R1)/H\r\n if R1 == R2: # cylinder\r\n V = abs(np.pi*R1*R1*H)\r\n h_c = H/2.0\r\n elif R1==0: \r\n V = abs(1.0/3.0*np.pi*R2*R2*H)\r\n h_c = 3.0/4.0*H\r\n else:\r\n V = abs(np.pi/3.0/m*(R2**3 - R1**3))\r\n h_c = H*(R1**2 + 2*R1*R2 + 3*R2**2)/4.0/(R1**2 + R1*R2 + R2**2) # ( coneV*1./4.*coneH - coneVtip*(1./4.*(coneH-H) + H) )/ taperV\r\n return V, h_c", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def vel(z, c = cp.cc.c_light_cm_s/1e5):\n # return z*c/(1+z)\n return c*((1+z)**2-1)/((1+z)**2+1)", "def distance_from_cylinder(self, points, params, sqrt=False):\n # axis: 3 x 1, center: 1 x 3\n axis, center, radius = params\n center = center.reshape((1, 3))\n axis = axis.reshape((3, 1))\n\n v = points - center\n prj = (v @ axis) ** 2\n\n # this is going negative at some point! fix it. Numerical issues.\n # voilating pythagoras\n dist_from_surface = torch.sum(v * v, 1) - prj[:, 0]\n dist_from_surface = torch.clamp(dist_from_surface, min=1e-5)\n\n distance = torch.sqrt(dist_from_surface) - radius\n # distance.register_hook(self.print_norm)\n distance = distance ** 2\n\n if sqrt:\n distance = guard_sqrt(distance)\n\n if torch.sum(torch.isnan(distance)):\n import ipdb;\n ipdb.set_trace()\n if self.reduce:\n distance = torch.mean(distance)\n\n return distance" ]
[ "0.73577124", "0.7329743", "0.72503877", "0.6993126", "0.68925846", "0.68604624", "0.68472177", "0.67055696", "0.66552156", "0.6639137", "0.6544904", "0.6456661", "0.633183", "0.62076217", "0.61787236", "0.61410314", "0.6091397", "0.6059696", "0.6049184", "0.6046165", "0.60013103", "0.60013103", "0.60013103", "0.60013103", "0.60013103", "0.5951592", "0.59418696", "0.59176785", "0.58725595", "0.5872051", "0.5790854", "0.57749695", "0.5764138", "0.5744195", "0.5742941", "0.5731307", "0.571015", "0.5691538", "0.56867766", "0.56848395", "0.56839186", "0.5657884", "0.56461626", "0.56399894", "0.5626941", "0.5617542", "0.5617503", "0.56159866", "0.5600786", "0.55727893", "0.5569838", "0.55664855", "0.55486387", "0.5545254", "0.5520897", "0.5520897", "0.5520897", "0.5520897", "0.5469066", "0.5443069", "0.5437509", "0.5420359", "0.54102385", "0.5407999", "0.5387315", "0.53830725", "0.537215", "0.53713644", "0.5341841", "0.53344786", "0.53306925", "0.5325854", "0.5318335", "0.5317186", "0.5308981", "0.5303932", "0.52995473", "0.52964514", "0.5286888", "0.52833354", "0.52759016", "0.5265466", "0.5261938", "0.52573335", "0.5250805", "0.5250805", "0.5250805", "0.5250805", "0.52478427", "0.52472705", "0.524603", "0.52399266", "0.52372414", "0.5230161", "0.5229741", "0.5217988", "0.5207454", "0.5204966", "0.5204629", "0.5203862" ]
0.62607145
13
Create an image of noise having standard deviation "sigma", and average 0.
def model_gauss_noise(sigma, nx, ny=1, nz=1): e = EMData() e.set_size(nx, ny, nz) e.process_inplace("testimage.noise.gauss", {"sigma":sigma}) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_gaussian_noise(image, min_sigma, max_sigma):\n assert(max_sigma >= min_sigma)\n sigma = np.random.uniform(min_sigma, max_sigma)\n noise = np.random.normal(loc=0, scale=sigma, size=image.shape)\n noisy_im = np.floor((image + noise) * 255) / 255\n noisy_im = np.clip(noisy_im, 0, 1)\n return noisy_im", "def addNoise(img, sigma=2.0, mean=0):\n img2 = np.random.normal(mean, sigma, size=img.shape)\n\n img2 += img\n img2 = np.uint8(img2.clip(0, 255))\n return img2", "def add_gaussian_noise(image, min_sigma, max_sigma):\r\n sigma = np.random.uniform(min_sigma, max_sigma)\r\n corrupted_im = image + np.random.normal(0, sigma, image.shape)\r\n rounded_im = (np.round(corrupted_im * MAX_PIXEL_NUMBER)) / MAX_PIXEL_NUMBER\r\n return np.clip(rounded_im, 0, 1).astype(np.float64)", "def random_gaussian_noise(image ):\n sigma = image.std()\n for ch in range(image.shape[2] ):\n sigma = min(sigma, image[:,:,ch].std() )\n image = random_noise(image, var = sigma**2)\n return image", "def addNoise(self, sigma=1.0):\n noise = numpy.random.normal(loc=0, scale=sigma, size=(self.ny, self.nx))\n self.image += noise\n return", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def calcu_noise_sigma(ts_data):\r\n ts_data_len = len(ts_data)\r\n if ts_data_len < 3:\r\n raise Exception('ts_data_len should be at least 3')\r\n y_ip1 = ts_data[2:]\r\n y_i = ts_data[1:-1]\r\n y_im1 = ts_data[:-2]\r\n sigma_n2 = (2.0 / (3 * len(y_ip1))) * sum(\r\n (0.5 * y_im1 - y_i + 0.5 * y_ip1)**2)\r\n sigma_n = np.sqrt(sigma_n2)\r\n # sigma_n = 1e-5 if sigma_n < 1e-5 else sigma_n\r\n minimal_sigma_n = 1e-6\r\n sigma_n = minimal_sigma_n if sigma_n < minimal_sigma_n else sigma_n\r\n return sigma_n", "def additive_gaussian_noise(img, seed=None, std=(0, 0.4)):\n if seed is None:\n random_state = np.random.RandomState(None)\n else:\n random_state = np.random.RandomState(seed)\n sigma = std[0] + random_state.rand() * std[1]\n gaussian_noise = random_state.randn(*img.shape) * sigma\n noisy_img = img + gaussian_noise\n # noisy_img = np.clip(noisy_img, 0, 1)\n return noisy_img", "def awgn(input, noise_std):\n\tif not isinstance(noise_std, (list, tuple)):\n\t\tsigma = noise_std\n\telse: # uniform sampling of sigma\n\t\tsigma = noise_std[0] + \\\n\t\t (noise_std[1] - noise_std[0])*torch.rand(len(input),1,1,1, device=input.device)\n\treturn input + torch.randn_like(input) * (sigma/255)", "def awgn(input, noise_std):\n\tif not isinstance(noise_std, (list, tuple)):\n\t\tsigma = noise_std\n\telse: # uniform sampling of sigma\n\t\tsigma = noise_std[0] + \\\n\t\t (noise_std[1] - noise_std[0])*torch.rand(len(input),1,1,1, device=input.device)\n\treturn input + torch.randn_like(input) * (sigma/255)", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def noise(self, stddev):\n #add noise to weights\n pass", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def GenerateGaussianNoise(PSD):\n\n Noise = np.zeros((N_fd),complex)\n # Generate noise from PSD \n Real = np.random.randn(N_fd)*np.sqrt(PSD/(4.*dF))\n Imag = np.random.randn(N_fd)*np.sqrt(PSD/(4.*dF))\n Noise = Real + 1j*Imag\n\n return Noise", "def make_noise_image(shape, distribution='gaussian', mean=None, stddev=None,\n seed=None):\n if mean is None:\n raise ValueError('\"mean\" must be input')\n\n rng = np.random.default_rng(seed)\n\n if distribution == 'gaussian':\n if stddev is None:\n raise ValueError('\"stddev\" must be input for Gaussian noise')\n image = rng.normal(loc=mean, scale=stddev, size=shape)\n elif distribution == 'poisson':\n image = rng.poisson(lam=mean, size=shape)\n else:\n raise ValueError(f'Invalid distribution: {distribution}. Use either '\n '\"gaussian\" or \"poisson\".')\n\n return image", "def gauss_noise(_spec, stdev):\n t, f = _spec.shape\n awgn = np.random.normal(0, stdev, (t, f))\n # gauss = gauss.reshape(t, f)\n _spec_noise = _spec + awgn\n return _spec_noise", "def make_4gaussians_image(noise=True):\n table = QTable()\n table['amplitude'] = [50, 70, 150, 210]\n table['x_mean'] = [160, 25, 150, 90]\n table['y_mean'] = [70, 40, 25, 60]\n table['x_stddev'] = [15.2, 5.1, 3.0, 8.1]\n table['y_stddev'] = [2.6, 2.5, 3.0, 4.7]\n table['theta'] = np.radians(np.array([145.0, 20.0, 0.0, 60.0]))\n\n shape = (100, 200)\n data = make_gaussian_sources_image(shape, table) + 5.0\n\n if noise:\n rng = np.random.RandomState(12345)\n data += rng.normal(loc=0.0, scale=5.0, size=shape)\n\n return data", "def gnoise(mag, sigma, mu):\n noise = np.random.normal(mu,sigma,n)\n mag = mag + noise\n return mag, noise", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image", "def gaussian_noise(tensor, mean, stddev):\n noise = Variable(tensor.data.new(tensor.size()).normal_(mean, std))\n return tensor + noise", "def add_gaussian_noise(x, std):\n noise = x.new_zeros(x.size()).normal_(std=std)\n return x + noise", "def random_gaussian_noise(self, img, p = 0.5):\n if self.decision(p):\n mean = 30.0\n std = 80.0\n img = img + np.random.normal(mean, std, img.shape)\n\n img = np.clip(img, 0, 255).astype('uint8')\n return img", "def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image", "def gaussian_noise(shape, mean, std):\n return tf.random.normal(shape=shape, mean=mean, stddev=std, dtype=tf.float32)", "def naive_gaussian_noise(true_depth: np.ndarray) -> np.ndarray:\n return true_depth + np.random.normal(0, 0.0012 + 0.0019 * np.square(true_depth - 0.4))", "def random_gaussian(img, mu=0.0, sigma=4.0):\n\n out = np.copy(img.astype(np.float))\n rows, cols, depth = img.shape\n noise = np.random.normal(mu, sigma, (rows, cols))\n for dim in range(depth):\n out[:, :, dim] = img[:, :, dim] + noise\n out[out > 255] = 255\n out[out < 0] = 0\n out = out.astype(np.uint8)\n\n return out", "def gaussian_noise(self, tensor):\n return tensor.new_empty(tensor.size()).normal_(std=self._discreteness)", "def noise(x: np.ndarray) -> np.ndarray:\n\n return np.random.normal(loc=MEAN, scale=1e-2, size=1)", "def add_noise(image, noise, rate=0.05):\n\n if noise == \"gaussian\":\n row, col = image.shape\n var = ndimage.laplace(image).var()\n sigma = (var*rate) ** 0.5\n print(var, sigma)\n gauss = np.random.normal(loc=0, scale=sigma, size=(row, col)) * rate\n noisy = image + gauss\n # noisy = image + gauss\n return noisy\n\n elif noise == \"salt_pepper\":\n output = image.copy()\n black = 0\n white = 255\n probs = np.random.random(image.shape[:2])\n output[probs < (rate / 2)] = black\n output[probs > 1 - (rate / 2)] = white\n\n return output\n\n else:\n return image", "def generate_noisy_window(d, sigma= 1):\n return np.random.normal(scale=sigma, size=d)", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals", "def _gaussian_for_learn_denosing_model(image):\n return add_gaussian_noise(image, 0, 0.2)", "def softing_noise(image, kn):\n\n s_noise = cv2.GaussianBlur(image, (kn, kn), 0)\n\n return s_noise", "def add_image_noise(\n self, mu=0, sigma=0.005, only_positive=False, random_seed=None):\n if random_seed is not None:\n np.random.seed(random_seed)\n shape = self.signal.axes_manager.shape\n noise = normal(mu, sigma, shape)\n if only_positive:\n self._image_noise = np.absolute(noise)\n else:\n self._image_noise = noise", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def estimate_noise_std(r_data):\n\n s_noise = 0.2 * np.max(abs(r_data))\n return s_noise", "def gaussian_black(z, mu: 'normal' = 0, sigma: (0.4,1) = 0.7):\n return 1/(np.sqrt(2*np.pi)*sigma)*np.exp(-np.power((z - mu)/sigma, 2)/2)", "def add_noise(pointcloud, stddev=0.01):\n result = pointcloud\n result[0:2, :] = np.random.normal(pointcloud[0:2, :], stddev)\n return result", "def gaussian_noise(inputs: torch.Tensor, stddev=None, sigma_max=0.06, channel_wise=1):\n if stddev is None:\n stddev = torch.rand(channel_wise) * sigma_max\n stddev = torch.tensor(stddev, device=inputs.device)\n if DATA_FORMAT == 'channels_first':\n stddev = stddev.reshape([1, -1] + [1] * (inputs.ndim - 2))\n else:\n stddev = stddev.reshape([1] * (inputs.ndim - 1) + [-1])\n noise_map = torch.randn_like(inputs) * stddev\n return noise_map", "def add_gaussian_noise(X, mu=0, sigma=0.1):\n noise = np.random.normal(0.0, sigma, size=X.size)\n return X + noise.reshape(X.shape)", "def gaussian_white(z, mu: 'normal' = 0, sigma: (0.4, 1) = 0.7):\n return 1 - gaussian_black(z, mu, sigma)", "def add_gaussian_noise(self, samples):\n\n if 'sigma' in self.gaussian_component:\n sigma = self.gaussian_component['sigma']\n return samples + self.random_state.normal(size=samples.shape) * sigma\n if 'sigmas' in self.gaussian_component:\n sigmas = self.gaussian_component['sigmas']\n return samples + self.random_state.normal(size=samples.shape) * sigmas\n\n return samples", "def make_100gaussians_image(noise=True):\n n_sources = 100\n flux_range = [500, 1000]\n xmean_range = [0, 500]\n ymean_range = [0, 300]\n xstddev_range = [1, 5]\n ystddev_range = [1, 5]\n params = {'flux': flux_range,\n 'x_mean': xmean_range,\n 'y_mean': ymean_range,\n 'x_stddev': xstddev_range,\n 'y_stddev': ystddev_range,\n 'theta': [0, 2 * np.pi]}\n\n rng = np.random.RandomState(12345)\n sources = QTable()\n for param_name, (lower, upper) in params.items():\n # Generate a column for every item in param_ranges, even if it\n # is not in the model (e.g., flux). However, such columns will\n # be ignored when rendering the image.\n sources[param_name] = rng.uniform(lower, upper, n_sources)\n xstd = sources['x_stddev']\n ystd = sources['y_stddev']\n sources['amplitude'] = sources['flux'] / (2.0 * np.pi * xstd * ystd)\n\n shape = (300, 500)\n data = make_gaussian_sources_image(shape, sources) + 5.0\n\n if noise:\n rng = np.random.RandomState(12345)\n data += rng.normal(loc=0.0, scale=2.0, size=shape)\n\n return data", "def _gauss_noise(self, shape):\n\n n = np.random.normal(0, 1, shape)\n return self.e*n", "def add_noise(image, type=\"s&p\"):\n # Get the width and height of the image\n w, h = image.size\n\n # Add salt and pepper noise\n if type == \"s&p\":\n # Choose a random amount of noise (lower number = more noise)\n salt = np.random.randint(100, 400)\n # Generate an array to determine location of noise\n noise = np.random.randint(salt+1, size=(h, w))\n\n # Find the index of the salt and pepper (respectively location with max/min random value)\n idx_salt = noise == salt\n idx_pepper = noise == 0\n\n # Create a numpy array from the initial image and add the salt and pepper\n np_img = np.array(image)\n np_img[idx_salt, :] = 255\n np_img[idx_pepper, :] = 0\n\n return Image.fromarray(np.uint8(np_img))\n\n # Add gaussian noise to image\n if type == \"gauss\":\n # Get the number of channels\n c = len(image.getbands())\n\n # Get a random value for the mean and the standard deviation of the noise\n mean = np.random.randint(-4, 5)\n std = np.random.randint(5)\n\n # Generate the noise array\n noise = np.random.normal(mean, std, (h, w, c))\n\n # Add noise to the image\n return Image.fromarray(np.uint8(np.array(image) + noise))\n\n else:\n # If the name of the given noise is not correct\n return image", "def generate_fake_noise(inputs, size):\n return np.random.normal(-0.0289923828125, 1.9391296947313124, (inputs, size)).astype(np.float32)", "def Noise(self, eps, size):\n return eps * (np.random.uniform(size=size) * 2 - 1)", "def sigmanorm(y):\n y = y.copy()\n y -= y.mean() # set to zero mean\n y /= y.std() # rescale to units of sigma\n return y", "def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def kernel_sigma(n_kernels):\n sigmas = [0.001] # exact match small variance means exact match ?\n if n_kernels == 1:\n return sigmas\n return sigmas + [0.1] * (n_kernels - 1)", "def drop_and_noise(image, sigma_d, percentage=0.8):\n M, N = image.shape[:2]\n n = N * M\n p = m.floor(percentage * n)\n image = np.cast[np.float32](image)\n\n missing_pixels_ind = np.random.permutation(n)[:p]\n\n mask = np.ones((M * N,), dtype=np.bool)\n mask[missing_pixels_ind] = 0\n mask = mask.reshape((M, N, 1))\n\n maskf = np.cast[np.float32](mask)\n y_clean = image * maskf\n\n noise = np.random.normal(loc=0, scale=sigma_d, size=image.shape) * maskf\n y = y_clean + noise\n\n return y, mask", "def add_noise(arr, sigma):\n dims = arr.shape\n arr += sigma * noise(*dims)", "def makeGaussian(height, width, sigma=3, center=None):\n x = np.arange(0, width, 1, float)\n y = np.arange(0, height, 1, float)[:, np.newaxis]\n if center is None:\n x0 = width // 2\n y0 = height // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def real_blur_and_noise(image, kernel, sigma_d):\n degraded = filter_image(image, kernel, mode=\"valid\", boundary=\"fill\")\n noise = np.random.normal(0.0, sigma_d, degraded.shape).astype(np.float32)\n degraded = degraded + noise\n initial = np.pad(degraded, ((kernel.shape[0] // 2, kernel.shape[0] // 2),\n (kernel.shape[1] // 2, kernel.shape[1] // 2),\n (0, 0)), mode=\"edge\")\n initial = edgeTaper(initial, kernel)\n return initial", "def flatten(img,sigma=20.) :\n\n for i in range(img.shape[0]) :\n img[i] /= np.median(img[i])\n for i in range(img.shape[1]) :\n img[:,i] /= np.median(img[:,i])\n\n hw=int(3*sigma)\n u=np.linspace(-hw,hw,2*hw+1)\n x=np.tile(u,(2*hw+1,1))\n y=x.T\n k=np.exp(-x**2/2/sigma**2-y**2/2/sigma**2)\n k /= np.sum(k)\n smooth=convolve2d(img,k,weight=None)\n img /= smooth\n\n return img", "def _FWHMGauss(sigma, pixel=12):\n return sigma*2*np.sqrt(2*np.log(2))*pixel", "def create_gaussian_array(self):\n\n # Fill array of size l x w with Gaussian Noise.\n terrain_length = int(ceil(self.length/self.resolution))\n terrain_width = int(ceil(self.width/self.resolution))\n gaussian_array = np.random.normal(self.mu, self.sigma, (terrain_length,terrain_width))\n\n # Filter the array to smoothen the variation of the noise\n gaussian_array = gaussian_filter(gaussian_array, self.sigma_filter)\n\n return gaussian_array", "def _get_noise(self, shape, dtype=None):\n return np.random.normal(self._bias, self._scale, shape).astype(dtype)", "def make_time_series_noise(dates, mean=0., std=1.):\n # TODO: implement Gaussian noise with mean/std\n # TODO: add kwarg to parametrize noise from clouds/shadows\n pass", "def initiategaussian(sd, x0):\n y = np.exp(-x**2/(2*sd**2))\n return y", "def add_imageNoise(img):\n if not np.all(img >= 0):\n print 'make sure the image pixel values are positive definite'\n sys.exit()\n noise = st.poisson.rvs(1.,loc = -1.,scale=1.,size=img.shape)*np.sqrt(img)\n return noise", "def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r", "def noise_generator(self, power=None, SNR=None, size=None):\r\n alpha = self.db2power(SNR)\r\n sigma = np.sqrt(power / alpha) # 计算噪声标准差\r\n # 产生噪声\r\n noise_data = np.sqrt(0.5) * (np.random.normal(0, sigma, size=size) + np.random.normal(0, sigma, size=size) * 1j)\r\n noise_data = noise_data.astype(np.complex64)\r\n return noise_data", "def get_sigma_psf(self):\n pass", "def _calc_sigma(self):\n if self.data is None:\n self._initial_blur()\n previous = self.init_sigma\n incr = 0\n self.sigmas = [(previous, incr)]\n for i in range(1, self.scale_per_octave + 3):\n sigma_abs = self.init_sigma * (self.dest_sigma / self.init_sigma) ** (1.0 * i / (self.scale_per_octave))\n increase = previous * sqrt((self.dest_sigma / self.init_sigma) ** (2.0 / self.scale_per_octave) - 1.0)\n self.sigmas.append((sigma_abs, increase))\n previous = sigma_abs\n logger.debug(\"Sigma= %s\" % self.sigmas)", "def task_gaussian_noise(input_array, noise_factor):\n return(np.random.normal(0, noise_factor, input_array.shape))", "def make_gaussian(size, sigma=10, center=None):\n\n x = np.arange(0, size[1], 1, float)\n y = np.arange(0, size[0], 1, float)\n y = y[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size[0] // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length, self.noise_dim)", "def generate_noise_image(content_image, noise_ratio = CONFIG.NOISE_RATIO):\n\tnoise_img = np.random.uniform(-20,20,(1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\n\t# Setting the resulting image to be the weighted average of the content image and noise_image\n\tresult_img = noise_img * noise_ratio + content_image * (1 - noise_ratio)\n\n\treturn result_img", "def noisePreset() :\n s.noisePreset()", "def generate_noise(\n self,\n noise_iteration=(1, 1),\n noise_size=(1, 1),\n noise_value=(0, 128),\n noise_background=(255, 255),\n noise_sparsity=(0.4, 0.6),\n noise_concentration=(0.4, 0.6),\n xsize=1500,\n ysize=1500,\n ):\n\n # generate random iterations\n iterations = random.randint(noise_iteration[0], noise_iteration[1])\n\n # generate background value\n background_value = random.randint(noise_background[0], noise_background[1])\n\n # initialize blank noise mask\n img_mask = np.full((ysize, xsize), fill_value=background_value, dtype=\"int\")\n\n # any invalid noise type will reset noise type to 0\n if self.noise_type not in [1, 2, 3, 4, 5]:\n noise_type = random.randint(1, 5)\n else:\n noise_type = self.noise_type\n\n # random location with no sides if no side is chosen\n if self.noise_side not in self.sides:\n noise_side = random.choice(self.sides)\n else:\n noise_side = self.noise_side\n\n # loop each iterations\n for _ in range(iterations):\n\n # divider to rescale noise mask to larger size\n y_divider = random.randint(noise_size[0], noise_size[1])\n x_divider = random.randint(noise_size[0], noise_size[1])\n\n # generate noise mask for current iteration\n img_mask_temporary = self.generate_mask_main(\n noise_type,\n noise_side,\n noise_value,\n noise_background,\n noise_sparsity,\n noise_concentration,\n int(xsize / x_divider),\n int(ysize / y_divider),\n )\n img_mask_temporary = cv2.resize(\n img_mask_temporary.astype(\"uint8\"),\n (xsize, ysize),\n interpolation=cv2.INTER_CUBIC,\n )\n\n # merge noise mask in each iteration by getting their min value\n img_mask = np.minimum(img_mask_temporary, img_mask)\n\n # output needs uint8 type\n img_mask = img_mask.astype(\"uint8\")\n\n return img_mask", "def ICA_Denoise(Y, ica_model, noise_std):\n\n # TODO: YOUR CODE HERE", "def threshold(image, sigma):\n image[image<1] = 0\n mean = np.mean(image[image>=1])\n std = np.std(image[image>=1])\n if (sigma == 0):\n thresh = mean + 0.5*std\n else:\n thresh = min(mean + 0.5*std, 30/sigma)\n image[image>thresh] = 255\n image[image<=thresh] = 0\n numAfterThresh = (image>0).sum()\n\n print(\"Average: {:.2f}\".format(mean))\n print(\"Std dev: {:.2f}\".format(std))\n print(\"Threshold: {:.2f}\".format(thresh))\n print(\"Number after threshold: {}\".format(numAfterThresh))\n return image", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def _sigma_1(gam, eps):\n s1 = 4 * r0**2 * alpha / eps / mec2_unit\n s2 = 1 + (1./3. - eps/gam) * (1 - eps/gam)\n s3 = np.log(2 * gam * (gam - eps) / eps) - 1./2.\n s3[np.where(gam < eps)] = 0.0\n return s1 * s2 * s3", "def _get_noise(self, shape, dtype=None):", "def salt_and_pepper_noise(image, prob):\n output = np.zeros(image.shape,np.uint8)\n thres = 1 - prob \n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > thres:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output", "def get_sigma(rate, dt=1 * units.ns):\n return stats.norm.isf(rate * dt)", "def create_gaussian_filter(size, sigma):\n h = size[0] #height of the template\n w = size[1] #width of the template \n if h % 2 == 0: h += 1 #add 1 if dimensions are even\n if w % 2 == 0: w += 1\n x = math.floor(h/2)\n y = math.floor(w/2) \n sum = 0\n #create our template\n template = np.zeros((h,w))\n #fill the template in with the numbers from Gaussian distribution\n for i in range(h):\n for j in range(w):\n template[i,j] = math.exp(-((((j-x)**2)+((i-y)**2))/(2*(sigma**2))))\n sum = sum + template[i,j]\n #normalise the numbers\n gaussian_filter = template/sum\n return gaussian_filter", "def addSTDdevIndices(img):\n\t\t\timg = img.addBands(img.normalizedDifference(['green','swir1']).rename(['ND_green_swir1'])); # NDSI, MNDWI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','red']).rename(['ND_nir_red'])); # NDVI\n\t\t\timg = img.addBands(img.normalizedDifference(['nir','swir2']).rename(['ND_nir_swir2'])); # NBR, MNDVI\n\t\t\t\n\t\t\treturn img;", "def add_gaussian_noise(images: list, var: list, random_var: float=None, gauss_noise: list=None):\n if random_var is None:\n random_var = np.random.uniform(var[0], var[1])\n mean = 0\n new_images = []\n gauss_noise_out = []\n for i,image in enumerate(images):\n row, col, c = image.shape\n if gauss_noise is None or \\\n (gauss_noise is not None and row*col*c !=\n gauss_noise[i].shape[0]*gauss_noise[i].shape[1] * gauss_noise[i].shape[2]):\n gauss = np.random.normal(mean, random_var * 127.5, (row, col, c))\n else:\n gauss = gauss_noise[i]\n gauss_noise_out.append(gauss)\n gauss = gauss.reshape(row, col, c)\n image1 = np.clip(image + gauss, 0., 255.)\n new_images.append(image1)\n return new_images, random_var, gauss_noise_out", "def gaussian(size,sigma):\n a,b=np.ogrid[-size/2:size/2,-size/2:size/2]\n mask = a**2+b**2\n mask = np.exp(-mask.astype('float')/(2*float(sigma**2)))\n return mask", "def add_noise(np_image, amount):\n noise = np.random.randn(np_image.shape[0],np_image.shape[1])\n norm_noise = abs(noise/np.max(noise))\n np_image = np_image + norm_noise*np.max(np_image)*amount\n \n return np_image", "def test_denoising(image, model, denoise_function,\n noise_range=(0.01, 0.05, 0.1, 0.2), patch_size=(8, 8)):\n h, w = np.shape(image)\n noisy_images = np.zeros((h, w, len(noise_range)))\n denoised_images = []\n cropped_original = crop_image(image, patch_size)\n\n # make the image noisy:\n for i in range(len(noise_range)):\n noisy_images[:, :, i] = image + (\n noise_range[i] * np.random.randn(h, w))\n\n # denoise the image:\n for i in range(len(noise_range)):\n denoised_images.append(\n denoise_image(noisy_images[:, :, i], model, denoise_function,\n noise_range[i], patch_size))\n\n # calculate the MSE for each noise range:\n noisy_mses = {}\n denoised_mses = {}\n for i in range(len(noise_range)):\n print(\"noisy MSE for noise = \" + str(noise_range[i]) + \":\")\n noisy_mse = np.mean((crop_image(noisy_images[:, :, i],\n patch_size) - cropped_original) ** 2)\n noisy_mses[str(noise_range[i])] = noisy_mse\n print(noisy_mse)\n print(\"denoised MSE for noise = \" + str(noise_range[i]) + \":\")\n denoised_mse = np.mean((cropped_original - denoised_images[i]) ** 2)\n denoised_mses[str(noise_range[i])] = denoised_mse\n print(denoised_mse)\n\n plt.figure(figsize=(20, 20))\n plt.axis('off')\n for i in range(len(noise_range)):\n plt.subplot(2, len(noise_range), i + 1, xlabel='Noisy image', xticks=[], yticks=[])\n plt.imshow(noisy_images[:, :, i], cmap='gray')\n plt.subplot(2, len(noise_range), i + 1 + len(noise_range), xlabel='Denoised image', xticks=[], yticks=[])\n plt.imshow(denoised_images[i], cmap='gray')\n plt.show()\n return noisy_mses, denoised_mses", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length + 2 * self.seq_pad,\n self.noise_dim)", "def estimate_noise(spec, ind_range=None):\n\n # -- set the index range (nb, ends at len(spec)-1 since the derivative has\n # one fewer points than the spectrum).\n ind_range = ind_range if ind_range else [0,spec.shape[0]-1]\n\n # -- compute the derivative and estimate the noise over the range\n noise = (spec[1:]-spec[:-1])[ind_range[0]:ind_range[1]].std(0)/np.sqrt(2.0)\n\n return noise", "def white_noise():\n return random.randint(-32767, 32767)", "def MVN_Denoise(Y, mvn_model, noise_std):\n return calc_weiner_filter(Y, mvn_model.mean, mvn_model.cov, noise_std)", "def make_DOG(inner_sigma, x):\n y = x\n outer_sigma = inner_sigma*5\n X, Y = np.meshgrid(x, y)\n inner_gaussian = 1./(2.*np.pi*inner_sigma) * np.exp(-(X**2 + Y**2)/2./inner_sigma**2) \n outer_gaussian = 1./(2.*np.pi*outer_sigma) * np.exp(-(X**2 + Y**2)/2./outer_sigma**2) \n return inner_gaussian - outer_gaussian/2 #weaker surround works better with our weights, which don't account for bursts ", "def create_synthetic_noise_dataset(cfg):\n from colorednoise import powerlaw_psd_gaussian\n\n betas = np.linspace(cfg['data.mix_synthetic_noise.min_beta'],\n cfg['data.mix_synthetic_noise.max_beta'],\n num=cfg['data.mix_synthetic_noise.num_samples'])\n sample_rate = cfg['data.sample_rate']\n segment_length = 2 * cfg['data.len_min']\n wavs = [powerlaw_psd_gaussian(beta, sample_rate * segment_length)\n for beta in betas]\n wavs = [audio.normalize(wav, low=-1, high=1) for wav in wavs]\n return NoiseDataset(wavs)", "def _get_add_noise(stddev, seed: int = None):\n if distutils.version.LooseVersion(\n tf.__version__) < distutils.version.LooseVersion('2.0.0'):\n\n # The seed should be only used for testing purpose.\n if seed is not None:\n tf.random.set_seed(seed)\n\n def add_noise(v):\n return v + tf.random.normal(\n tf.shape(input=v), stddev=stddev, dtype=v.dtype)\n else:\n random_normal = tf.random_normal_initializer(stddev=stddev, seed=seed)\n\n def add_noise(v):\n return v + tf.cast(random_normal(tf.shape(input=v)), dtype=v.dtype)\n\n return add_noise", "def sampleGaussian(self, mu, log_sigma):\n # reparameterization trick\n epsilon = tf.random_normal(tf.shape(log_sigma), name=\"epsilon\")\n return mu + epsilon * tf.exp(log_sigma) # N(mu, I * sigma**2)", "def standardize(image, mean=[0.48462227599918, 0.45624044862054, 0.40588363755159], std=[0.22889466674951, 0.22446679341259, 0.22495548344775]):\n image = image.astype(np.float32) / 255.0\n image = np.divide(np.subtract(image, mean), std)\n return image", "def get_image_with_poisson_noise(image):\r\n img = tf_norm_crop_resize_image(image, resize_dim=(64,64))\r\n noisy_img = np.clip(make_noisy_images(img*255.)/255., 0., 1.)\r\n return noisy_img", "def add_gaussian_noise(self, stdev, iteration = 1, method = 'absolute', normalize = \"on\"):\n\n self.number_of_replicate = iteration\n for fragment in self.fragments_for_mdv_calculation:\n number_of_mass_data = max(self.mdv[fragment].keys()) + 1\n noise = numpy.zeros((number_of_mass_data, iteration))\n for i in range(iteration):\n for number in range(number_of_mass_data):\n if method == 'relative':\n noise[number, i] = (numpy.random.randn() * stdev + 1) * self.mdv[fragment][number]['ratio']\n else:\n noise[number, i] = (numpy.random.randn() * stdev) + self.mdv[fragment][number]['ratio']\n if noise[number, i] < 0.0:\n noise[number, i] = 0.0\n #各フラグメント毎に総和を1にする。\n if normalize == \"on\":\n sumvalue = sum(noise[:,i])\n noise[:,i] = noise[:,i] / sumvalue\n for number in range(number_of_mass_data):\n self.mdv[fragment][number]['ratio']= sum(noise[number,:])/iteration\n #self.mdv[fragment][number]['std'] = numpy.std(noise[number,:])\n self.mdv[fragment][number]['data'] = numpy.array(noise[number,:])", "def sigma(h,T):\r\n P = 1/(1+np.exp(-2*h/T)) #from the mean field results\r\n ll = len(h)\r\n rand = np.random.rand(ll)\r\n pos = np.where(P-rand>0)[0]\r\n r = -np.ones(ll)\r\n r[pos] = +1\r\n return r", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def GSM_Denoise(Y, gsm_model, noise_std):\n X = np.empty(Y.shape)\n k = gsm_model.mix.shape[0]\n I = np.identity(gsm_model.cov[0, :].shape[0])\n for i in range(k):\n mvn = multivariate_normal(cov=(gsm_model.cov[i, :] + ((noise_std**2) * I)))\n upper_arg = gsm_model.mix[i] * (mvn.logpdf(Y[:, i]))\n lower_arg = 0\n for j in range(k):\n inner_mvn = multivariate_normal(cov=(gsm_model.cov[j] + ((noise_std**2) * I)))\n lower_arg += gsm_model.mix[j] * (inner_mvn.logpdf(Y[:, i]))\n c_i = upper_arg / lower_arg\n weiner_i = calc_weiner_filter(Y, np.zeros(Y.shape[0]), gsm_model.cov[i, :], noise_std)\n X += c_i * weiner_i\n return X", "def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H" ]
[ "0.7620322", "0.7568027", "0.73862773", "0.7340184", "0.70870167", "0.70391494", "0.69299024", "0.6861069", "0.6838096", "0.6838096", "0.682284", "0.6760456", "0.6759536", "0.67473197", "0.6671933", "0.66611886", "0.6641475", "0.66406924", "0.66172576", "0.6612591", "0.65864265", "0.65774983", "0.65765923", "0.6555375", "0.6541537", "0.6509458", "0.6498063", "0.6495391", "0.64597684", "0.6458453", "0.6457465", "0.6429288", "0.64284694", "0.64243513", "0.6393034", "0.63697535", "0.63470703", "0.6326588", "0.6307213", "0.630143", "0.62875634", "0.6261008", "0.62529135", "0.6249006", "0.61880517", "0.61761683", "0.616784", "0.6163098", "0.61541635", "0.61332005", "0.61054385", "0.60982966", "0.6074332", "0.60614824", "0.6053994", "0.6049023", "0.60473996", "0.60408926", "0.60152644", "0.6011519", "0.6009819", "0.60087454", "0.6001367", "0.6001354", "0.5985209", "0.5972889", "0.5961943", "0.59462184", "0.59460557", "0.59436375", "0.5942034", "0.593967", "0.59383345", "0.59376675", "0.5922431", "0.59048975", "0.5898023", "0.58976614", "0.58907855", "0.5880775", "0.5880528", "0.58765113", "0.5867165", "0.586603", "0.5857762", "0.5857499", "0.58494973", "0.5838661", "0.5823566", "0.58190185", "0.58066475", "0.5799588", "0.5798871", "0.57902163", "0.5784496", "0.5782808", "0.5782275", "0.577743", "0.57729137", "0.5764255" ]
0.68094635
11
Create a blank image.
def model_blank(nx, ny=1, nz=1, bckg = 0.0): e = EMData() e.set_size(nx, ny, nz) e.to_zero() if( bckg != 0.0): e+=bckg return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _blankimage():\n img = TK.PhotoImage(width=1, height=1)\n img.blank()\n return img", "def create_empty_image(width=512, height=512):\n blank_img = np.zeros((width, height, 3), np.uint8)\n # Return instance of the class\n return ExtendedImage(blank_img)", "def blank_image(height, width):\n all_green = create_uniform_image(height, width, [0, 255, 0])\n return all_green", "def gen_empty_img(w=640, h=480):\n return np.zeros((h, w, 3), np.uint8)", "def blank(width, height, channels=3, value=0):\n blank_image = np.full((height, width, channels), value, np.uint8)\n return Image(img=blank_image)", "def create_white_picture(pic_width, pic_height):\n white_picture = Image.new(\"1\", (pic_width, pic_height), (1))\n return white_picture", "def create_blank(width, height, rgb_color=(0, 0, 0)):\r\n # Create black blank image\r\n image = np.zeros((height, width, 3), np.uint8)\r\n\r\n # Since OpenCV uses BGR, convert the color first\r\n color = tuple(reversed(rgb_color))\r\n # Fill image with color\r\n image[:] = color\r\n\r\n return image", "def create_blank(width, height, rgb_color=(0, 0, 0)):\n # Create black blank image\n image = np.zeros((height, width, 3), np.uint8)\n\n # Since OpenCV uses BGR, convert the color first\n color = tuple(reversed(rgb_color))\n # Fill image with color\n image[:] = color\n\n return image", "def create_blank(width, height, rgb_color=(0, 0, 0)):\n # Create black blank image\n image = np.zeros((height, width, 3), np.uint8)\n\n # Since OpenCV uses BGR, convert the color first\n color = tuple(reversed(rgb_color))\n # Fill image with color\n image[:] = color\n\n return image", "def create_blank(w, h, rgb_color=(0, 0, 0)):\n image = np.zeros((h, w), np.uint8)\n color = tuple(reversed(rgb_color))\n image[:] = 0\n return image", "def create_blank(w, h, rgb_color=(0, 0, 0)):\n image = np.zeros((h, w), np.uint8)\n color = tuple(reversed(rgb_color))\n image[:] = 0\n return image", "def create_image(self):\n\n self._image = 255 * np.ones((self._height, self._width, 3), np.uint8)", "def EmptyBitmap(*args, **kwargs):\n val = _gdi_.new_EmptyBitmap(*args, **kwargs)\n return val", "def blanck_picture(img):\r\n blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)\r\n blank_image[0:, 0:] = 0, 0, 0\r\n return blank_image", "def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)", "def create_blank(cls, n: int, m: int):\n image = np.zeros((n, m, 3), np.uint8)\n\n color = (255, 255, 255)\n # Fill image with color\n image[:] = color\n\n return image", "def make_image(self, path):\n\t\treturn None", "def create_one_image(attrs=None):\n attrs = attrs or {}\n\n # Set default attribute\n image_info = {\n 'id': str(uuid.uuid4()),\n 'name': 'image-name' + uuid.uuid4().hex,\n 'owner': 'image-owner' + uuid.uuid4().hex,\n 'container_format': '',\n 'disk_format': '',\n 'min_disk': 0,\n 'min_ram': 0,\n 'is_public': True,\n 'protected': False,\n 'properties': {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'},\n 'status': 'status' + uuid.uuid4().hex,\n }\n\n # Overwrite default attributes if there are some attributes set\n image_info.update(attrs)\n\n return image.Image(**image_info)", "def blanck_picture(img):\r\n\r\n blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)\r\n blank_image[0:img.shape[0], 0:img.shape[1]] = 0, 0, 0", "def new_image(self, width, height, background=None, mode=\"RGBA\"):\n self.img = PIL.Image.new(mode, (width, height), background)\n self.width,self.height = width,height\n self.drawer = aggdraw.Draw(self.img)", "def generate_image(self):\n pass", "def get_blank_img(self):\n if photos_settings.DEBUG:\n return self.get_placeholder_img()\n\n out = {\n 'blank': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name),\n }\n return out", "def test_create_image(self):\n pass", "def new_test_image():\n warnings.warn(DeprecationWarning(\n \"new_test_image() is deprecated in favour of the get_sample_image() \"\n \"context manager.\"), stacklevel=2)\n image_name = 'test-{}.png'.format(uuid.uuid4())\n image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))\n ImageDraw.Draw(image)\n byte_io = BytesIO()\n image.save(byte_io, 'png')\n byte_io.seek(0)\n return image_name, ContentFile(byte_io.read(), image_name)", "def NullImageProto(msg:str = \"\"):\n return NLImage(width=0, height=0, data=msg)", "def _clear(self):\n\n self.image = Image.new(\"RGB\", (self._width, self._height), self._color)", "def create_image(self):\n # how many categories?\n aspect_ratio = float(4) / 3\n self.width = int(math.sqrt(aspect_ratio * self.total))\n self.height = int(self.width / aspect_ratio)\n\n img = Image.new(\"RGB\", (self.width, self.height))\n return img", "def plot_blank(self):\n self.figure_bmp.SetBitmap(self.controller.plot_blank())", "def _ensure_empty_image_ok(self):\n if self.ignore_empty:\n return\n\n if len(self) > 1:\n raise RuntimeError(\n \"Cannot write None image at extension %d\" % len(self))\n if 'ndims' in self[0]._info:\n raise RuntimeError(\"Can only write None images to extension zero, \"\n \"which already exists\")", "def make_image(storage, name, width, height, format='JPEG', mode='RGB'):\n im = Image.new(mode, (width, height))\n draw = ImageDraw.Draw(im)\n draw.rectangle([0, 0, width // 2, height // 2], '#F00')\n draw.rectangle([width // 2, 0, width, height // 2], '#0F0')\n draw.rectangle([0, height // 2, width // 2, height], '#00F')\n draw.rectangle([width // 2, height // 2, width, height], '#000')\n draw.rectangle([width // 4, height // 4, 3 * width // 4, 3 * height // 4], '#FFF')\n im_bytes_io = io.BytesIO()\n im.save(im_bytes_io, format)\n im_bytes_io.seek(0)\n storage.save(name, im_bytes_io)", "def create_artificial_image(self):\n background = self.BGI.create_background_image()\n star_PSF = self.PSF.create_star_PSF()\n header = self.HDR.create_header()\n\n fits.writeto(self.image_dir + self.image_name + '.fits',\n background + star_PSF, overwrite=True, header=header)", "def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)", "def clear(self):\n self.display(Image.new(self.mode, self.size))", "def _createimage(self, image):\n return self.cv.create_image(0, 0, image=image)", "def get_blank_image(width: int, height: int, n_channels: int, cval=255) -> np.ndarray:\n if n_channels == 0:\n image = np.zeros((height, width)) + 255\n else:\n image = np.zeros((height, width, n_channels)) + cval\n return image.astype(\"uint8\")", "def initialise(self):\r\n self.set_image(\"wall.png\")\r\n return self", "def __generate_image(self):\n\t\tself.img = np.ones((self.size*self.width+self.border,self.size*self.width+self.border,1), np.uint8)*255\n\t\tfor i in range(len(self.matrix)):\n\t\t\tfor j in range(len(self.matrix)):\n\t\t\t\tif self.matrix[j][i] == 1:\n\t\t\t\t\tself.img = cv2.rectangle(self.img,(i*self.width+int(self.border/2),j*self.width+int(self.border/2))\n\t\t\t\t\t\t,(i*self.width+self.width+int(self.border/2),j*self.width+self.width+int(self.border/2)),(0,0,0),-1)\n\t\tif '.' in self.name:\n\t\t\tcv2.imwrite(self.name,self.img)\n\t\telse:\n\t\t\tcv2.imwrite(self.name+'.jpg',self.img)\n\t\tcv2.imshow(\"Image\",self.img)\n\t\tcv2.waitKey(0)\n\t\tcv2.destroyAllWindows()", "def create_base_image(self, builder, template, parameters):", "def generateEmptySprite(cls, size, alpha=None, fillWith=None, colorKey=None):\n # BBB: move this away from here, like in a simple module\n surface = pygame.Surface(size, flags=HWSURFACE|HWPALETTE, depth=32)\n if fillWith:\n surface.fill(fillWith)\n if alpha is not None:\n surface.set_alpha(alpha)\n if colorKey:\n surface.set_colorkey(colorKey)\n return surface", "def _EmptyBitmapRGBA(*args, **kwargs):\n return _gdi_._EmptyBitmapRGBA(*args, **kwargs)", "def no_bin(image, *args, **kwargs):\n return image", "def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)", "def make_montage(infile):\n\n #create a blank image\n blank = Image.new(\"RGB\", OUTPIC_SIZE ,(25,23,24,255))\n\n #paste the background\n bg = Image.open(BACKGROUND_FILENAME)\n blank.paste(bg,(0,0))\n \n #paste the raw picture\n im = Image.open(infile)\n \n # past on background\n blank.paste(im,OUTPIC_PHOTO_OFFSET)\n\n #paste the stickers layer on the pic (a transparent png)\n stickers = Image.open(STICKERS_FILENAME)\n blank.paste(stickers,(0,0),stickers)\n\n return blank", "def draw_nonogram(self):\n image = Image.new(\"RGB\", (self.nonogram_size * 50, self.nonogram_size * 50), (255, 255, 255))\n draw = ImageDraw.Draw(image)\n\n for index, square in enumerate(reduce(lambda x, y: x+y, self.grid), 0):\n\n #print(square)\n x = index % self.nonogram_size\n y = index // self.nonogram_size\n coord = [(x * 50, y * 50), ((x + 1) * 50, (y + 1) * 50)]\n if square == EMPTY:\n draw.rectangle(coord, fill=(255, 255, 255))\n if square == FILLED:\n draw.rectangle(coord, fill=(0, 0, 0))\n return image", "def build_filler_images(self):", "def maketestimage(self, *args, **kwargs):\n return _image.image_maketestimage(self, *args, **kwargs)", "def create_image(path, pxcount):\n img = Image.open(path, 'r').convert('L')\n pixels = img.load()\n for i in range(pxcount):\n x = randint(0, img.size[0]-1)\n y = randint(0, img.size[0]-1)\n if pixels[x, y] == 0:\n pixels[x, y] = 255\n else:\n pixels[x, y] = 0\n return img", "def make_image():\n click.echo(\"make_image\")", "def EmptyBitmapRGBA(width, height, red=0, green=0, blue=0, alpha=0):\n return _gdi_._EmptyBitmapRGBA(width, height, red, green, blue, alpha)", "def make_empty_directories_linux() -> None:\n mkdir(PICTURES_DIR / 'screenshots' / 'grim')\n mkdir(PICTURES_DIR / 'screenshots' / 'swappy')", "def imageprepare(self,argv):\r\n\t\tim = Image.open(argv).convert('L')\r\n\t\twidth = float(im.size[0])\r\n\t\theight = float(im.size[1])\r\n\t\tnewImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\r\n\r\n\t\tif width > height: # check which dimension is bigger\r\n\t\t\t# Width is bigger. Width becomes 20 pixels.\r\n\t\t\tnheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\r\n\t\t\tif nheight == 0: # rare case but minimum is 1 pixel\r\n\t\t\t\tnheight = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twtop = int(round(((28 - nheight) / 2), 0)) # caculate horizontal pozition\r\n\t\t\tnewImage.paste(img, (4, wtop)) # paste resized image on white canvas\r\n\t\telse:\r\n\t\t\t# Height is bigger. Heigth becomes 20 pixels.\r\n\t\t\tnwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\r\n\t\t\tif (nwidth == 0): # rare case but minimum is 1 pixel\r\n\t\t\t\tnwidth = 1\r\n\t\t\t\t# resize and sharpen\r\n\t\t\timg = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n\t\t\twleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\r\n\t\t\tnewImage.paste(img, (wleft, 4)) # paste resized image on white canvas\r\n\r\n\t\t# newImage.save(\"sample.png\")\r\n\r\n\t\ttv = list(newImage.getdata()) # get pixel values\r\n\r\n\t\t# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n\t\ttva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n\t\treturn tva", "def construct_image(imgs):\n\n # todo fill missing pieces and\n\n if len(imgs) == 0:\n return None\n # taking the first\n w, h = imgs[0][1].size\n img_array = order_2d(imgs)\n x_count = len(img_array[0])\n y_count = len(img_array)\n height = h * y_count\n width = w * x_count\n new_im = Image.new('RGB', (width, height))\n for y in range(y_count):\n for x in range(x_count):\n _, im = img_array[y][x]\n new_im.paste(im, (x * w, y * h))\n return new_im", "def create_image(self, shapes):\n img = image.IMG()\n img.draw_shapes(shapes)\n img = np.transpose(img.array(), (2, 0, 1))\n return img", "def create_image(storage, filename, size=(100, 100), image_mode='RGB', image_format='PNG'):\n data = BytesIO()\n PIL.Image.new(image_mode, size).save(data, image_format)\n data.seek(0)\n if not storage:\n return data\n image_file = ContentFile(data.read())\n return storage.save(filename, image_file)", "def test_empty_image(self):\n r = post(self.client, 'upload.up_image_async', {'image': ''},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('You have not selected an image to upload.',\n json_r['errors']['image'][0])", "def new(self, size, fill):\n return Image(PIL.Image.new(\"RGB\", size, fill))", "def clear(self):\n self.img = PIL.Image.new(self.img.mode, self.img.size, self.background)\n self.drawer = aggdraw.Draw(self.img)", "def fill_image(im):\n width, height = im.size\n # Select the larger value of the length and width of the original picture\n # as the radius of the nine palace grid of the new picture\n new_image_len = width if width > height else height\n # Create a white canvas\n new_image = Image.new(im.mode, (new_image_len, new_image_len), color=\"white\")\n # Paste the original image on the canvas at the center\n if width > height:\n new_image.paste(im, (0, int((new_image_len - height) / 2)))\n else:\n new_image.paste(im, (int((new_image_len - width) / 2), 0))\n return new_image", "def generate_image(self):\n\n if not has_pillow:\n raise RuntimeError(\"requires https://pypi.org/project/pillow/\")\n\n background = self.get_background()\n foreground = self.get_foreground()\n\n matrix = self.generate_matrix()\n\n image = Image.new(\"RGB\", (420, 420), background)\n draw = ImageDraw.Draw(image)\n\n for (i, row) in enumerate(matrix):\n for (j, bit) in enumerate(row):\n x = 35 + j * 70\n y = 35 + i * 70\n\n if bit:\n draw.rectangle((x, y, x + 70, y + 70), foreground)\n\n return image", "def set_default_image(self, image):\n raise NotImplementedError", "def create_target_image(self, builder, target, base_image, parameters):", "def imBox(self, width, height):\n img = Image.new(\"1\", (width, height))\n draw = ImageDraw.Draw(img)\n bgColor=255\n draw.rectangle((0,0) + img.size,fill=bgColor)\n return img", "def to_image(self, width=800, height=600, **kwargs):\n img = _white_image(\n parameters=self.parameters,\n width=width,\n height=height,\n **kwargs\n )\n return img", "def create_test_image(dirname, array, normalization=None):\n filename = str(dirname / 'tmp.tif')\n create_test_file(filename, array)\n satellite = 'quickbird'\n image = Image(filename, satellite, normalization_parameters=normalization)\n return image", "def create(self, name, image, command, **kwargs):\n return", "def draw_empty( text, file, kw ):\n\n prefs = dict(globals()['prefs'])\n for key, data in kw.items():\n prefs[key] = data\n fig = Figure()\n canvas = FigureCanvasAgg( fig )\n dpi = prefs['width'] /prefs['width_inches']\n height_inches = prefs['height'] / float(dpi)\n fig.set_size_inches( prefs['width_inches'], height_inches )\n fig.set_dpi( dpi )\n fig.set_facecolor('white')\n fig.text( .5, .5, text, horizontalalignment='center' )\n if isinstance( file , StringIO.StringIO ) or type(file) == cStringIO_type:\n canvas.draw()\n size = canvas.get_renderer().get_canvas_width_height()\n # Hack: for some unknown reason in py27 this call is returning floats.\n # Convert it to int coordinates so that PIL doesnt complain.\n size = (int(size[0]),int(size[1]))\n buf=canvas.tostring_argb()\n im=PILImage.fromstring('RGBA', size, buf, 'raw', 'RGBA', 0, 1)\n a, r, g, b = im.split()\n im = PILImage.merge( 'RGBA', (r, g, b, a) )\n im.save( file, format = 'PNG' )\n else:\n canvas.print_figure( file, **kw )", "def make_image(self, imagename, fitsname, niter=500, antenna='', phasecenter='', start=200, stop=900, del_img=True, overwrite=False): \n self.generate_image(imagename, antenna=antenna, niter=niter, phasecenter=phasecenter, start=start, stop=stop)\n self.to_fits(imagename + '.image', fitsname, overwrite=overwrite)\n if del_img:\n self.remove_image(imagename, del_img=True)", "def _make_image(self):\n self.image.fill(0)\n self.image[:, :, PillEater.WALLS] = self.walls\n self.image[:, :, PillEater.FOOD] = self.world_state['food']\n self.image[self.world_state['pillman']['pos'][0], self.world_state['pillman']['pos'][1],\n PillEater.PILLMAN] = 1\n for ghost in self.world_state['ghosts']:\n edibility = self.world_state['power'] / float(self.pill_duration)\n self.image[ghost['pos'][0], ghost['pos'][1], PillEater.GHOSTS] = 1. - edibility\n self.image[ghost['pos'][0], ghost['pos'][1], PillEater.GHOSTS_EDIBLE] = edibility\n for pill in self.world_state['pills']:\n self.image[pill['pos'][0], pill['pos'][1], PillEater.PILL] = 1\n return self.image", "def draw_empty( self ):\n prefs = self.prefs\n fig = Figure()\n canvas = FigureCanvasAgg( fig )\n dpi = prefs['width'] /prefs['width_inches']\n height_inches = prefs['height'] / float(dpi)\n fig.set_size_inches( prefs['width_inches'], height_inches )\n fig.set_dpi( dpi )\n fig.set_facecolor('white')\n fig.text( .5, .5, \"No data returned by DB query.\", horizontalalignment='center' )\n self.ax = None\n self.fig = fig\n self.canvas = canvas", "def get_image(filename=None, astrotarget=None,\n **kwargs):\n return Image(filename,astrotarget=astrotarget,\n **kwargs)", "def test_annotate_blank(self):\n x_size = 500\n y_size = 500\n\n label = li.Label(\"Test Label\", (0.20, 0.20))\n test_im = li.ImageBlank(None, x_size, y_size)\n test_im.annotate(label)\n annotated_im = test_im.data\n\n # Ensure the shape is retained\n shape_expected = (y_size, x_size, 4)\n shape_test = annotated_im.shape\n self.assertEqual(\n shape_test, shape_expected, msg=\"shape not retained after annotation\"\n )\n\n # Test that there are darker text areas\n flattened = annotated_im[..., :3].mean(axis=-1)\n n_black_pixels = (flattened < 10).sum()\n\n self.assertGreater(n_black_pixels, 50, msg=\"Not enough black/text pixels\")\n self.assertLess(n_black_pixels, 5000, msg=\"Too many black/text pixels\")", "def make_empty_img_from_img(img, dimensions=3):\n xlen, ylen, zlen = img.GetSize()\n dupe = img[:, :, :]\n for x in xrange(xlen):\n for y in xrange(ylen):\n if dimensions == 3:\n for z in xrange(zlen):\n dupe.SetPixel(x, y, z, 0)\n else:\n dupe.SetPixel(x, y, 0)\n return dupe", "def create_image(self, instance_id, name,\r\n description=None, no_reboot=False):\r\n params = {'InstanceId' : instance_id,\r\n 'Name' : name}\r\n if description:\r\n params['Description'] = description\r\n if no_reboot:\r\n params['NoReboot'] = 'true'\r\n img = self.get_object('CreateImage', params, Image, verb='POST')\r\n return img.id", "def make_image():\n # get the mask\n twitter_mask = np.array(Image.open('resource/twitter-mask.png'))\n\n wc = WordCloud(background_color='white', max_words=100, mask=twitter_mask, contour_width=3,\n contour_color='steelblue')\n\n # generate word cloud\n wc.generate_from_frequencies(get_word_frequency())\n\n # store to file\n wc.to_file('/tmp/twitter.png')\n\n # show\n frame = cv2.imread('/tmp/twitter.png')\n cv2.imshow('figure', frame)\n cv2.waitKey(60000)\n cv2.destroyAllWindows()", "def glance_create_new_image(glance, images_location, image_info, image_name_prefix=None):\n # image raw file path\n image_raw_source = image_info['image_raw_source']\n image_file = os.path.join(images_location, image_raw_source)\n\n if not os.path.isfile(image_file):\n logger.warning(\"image raw file:'%s' not found!\", image_file)\n return None\n\n fimg = None\n try:\n fimg = open(image_file, 'rb')\n except Exception:\n logger.error(\"Opening raw image file:'%s' failed\", image_file)\n return None\n\n try:\n # image name\n image_name = image_info['image_name']\n if image_name_prefix:\n image_name = \"{}{}\".format(image_name_prefix, image_name)\n logger.debug(\"image_name: %s\", image_name)\n\n # image min_disk\n if image_info['image_min_disk'] == 'auto':\n # compute the size of the file -> min disk size in GB\n imagesize = os.fstat(fimg.fileno()).st_size\n image_min_disk = (imagesize/1024/1024/1024)+1\n else:\n image_min_disk = image_info['image_min_disk']\n logger.debug(\"image_min_disk: %s\", image_min_disk)\n\n # image min_ram\n image_min_ram = image_info['image_min_ram']\n logger.debug(\"image_min_ram: %s\", image_min_ram)\n\n # image properties (dictionary)\n image_properties = image_info['image_properties']\n logger.debug(\"image_properies: %s\", image_properties)\n\n logger.debug(\"glance image create (private): '%s'\", image_name)\n image = glance.images.create(name=image_name,\n visibility='private',\n disk_format='raw',\n container_format='bare',\n min_disk=int(image_min_disk),\n min_ram=int(image_min_ram))\n logger.debug(\"glance image upload: '%s' -> '%s'\", fimg.name, image_name)\n glance.images.upload(image.id, fimg)\n\n except Exception:\n logger.exception(\"Creating and uploading Glance image '%s' failed\", image_name)\n return None\n\n return image", "def testBinaryImage():\n ALIEN = \"0\"*8 + \"11011011\"*2 + \"0\"*8 + \"00001000\" + \\\n \"01000010\" + \"01111110\" + \"0\"*8\n # this function is imported from cs5png.py\n NUM_ROWS = 8\n NUM_COLS = 8\n binaryIm( ALIEN, NUM_COLS, NUM_ROWS )\n # that should create a file, binary.png, in this\n # directory with the 8x8 image...", "def new(mode, size, color=0):\r\n\r\n _check_size(size)\r\n\r\n if color is None:\r\n # don't initialize\r\n _im = Image()._new(mode, size)\r\n return Image(_im)\r\n\r\n if type(color).__name__ == \"str\":\r\n # css3-style specifier\r\n color = ImageColor().getcolor(color, mode)\r\n color = ImageDraw(None)._convert_bgr2rgb(color)\r\n\r\n _im = Image()._new(mode, size, color)\r\n return Image(_im)", "def generate_image(self):\n\t\tcenters = self.generate_centers()\n\t\timg = Image.new('RGB', (self.config.image_size, self.config.image_size), color=(0,0,0))\n\t\tshapes = np.random.randint(2, size=len(centers))\n\t\tdrawer = ImageDraw.Draw(img)\n\t\tr = int(0.05 * self.config.image_size)\n\t\tR = []\n\t\tfor i in range(len(centers)):\n\t\t\tcoor = (centers[i][0] - r , centers[i][1] - r, centers[i][0] + r, centers[i][1] + r)\n\t\t\tif shapes[i] < 0.5:\n\t\t\t\tdrawer.rectangle(coor, fill=COLOR[i])\n\t\t\telse:\n\t\t\t\tdrawer.ellipse(coor, fill=COLOR[i])\n\t\t\tR.append([centers[i], i, shapes[i]])\n\t\treturn np.array(img), R", "def createBlankPlot(self):\n\n fig = plt.figure(figsize=(8,6),dpi=80)\n fig.set_facecolor('#ededed')\n \n # Format plot\n ax = plt.subplot(111)\n \n fig.canvas.draw()\n \n return fig, ax", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def test_draw(self):\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('L', (15,90),'white')\n\n for i in range(len(draw_points) - 1):\n draw(canvas, (draw_points[i + 0], draw_points[i + 1]), 'A')\n\n \"\"\" saving the file and closing it \"\"\"\n\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)", "def _create_layer() -> Image:\n data = np.random.random((32, 16))\n return Image(data)", "def Empty():\n return Container(name='(empty)',\n metadata={},\n section_sizes={},\n metrics_by_file={})", "def get_inline_stmt_emptybitmap(self, bitmap):\n # keep in sync with BitmapMixin.get_preview_obj_bitmap()\n width = 16\n height = 16\n try:\n size = bitmap[6:]\n width, height = [int(item.strip()) for item in size.split(',', 1)]\n except ValueError:\n logging.warn( 'Malformed statement to create an empty bitmap: %s', bitmap )\n stmt = self.tmpl_inline_emptybitmap % { 'width': width, 'height': height }\n return stmt", "def test_random_single_image():\n\n shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False)", "def topil(self) -> Image.Image:\n if self.width == 0 or self.height == 0:\n return None\n return Image.frombytes(\n \"RGBA\", (self.width, self.height), self.data, \"raw\", \"ARGB\", 0, 1\n )", "def create_zeroed_image(n):\n\n alist = []\n for i in range(0, n):\n alist.append([])\n for j in range(0,n):\n alist[i].append(0)\n return alist", "def _create_image(self):\n if hasattr(self, '_image') and self._image:\n return self._image\n try:\n command = \"tex2im -b transparent -t cyan\"\n subprocess.run([*command.split(), self._formula])\n except Exception as e:\n import traceback\n print(traceback.format_exc())\n return None\n # tex2im converts to out.png by default\n img = Image.open('out.png').convert('RGBA')\n # create a new rgba image to blend the latex with the alpha\n subprocess.run([\"rm\", \"out.png\"])\n return img", "def create_image(self):\n img = cv2.imread(self.url)\n self.img = cv2.resize(img, (self.window_x, self.window_y))", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def make_image(self, mode=\"L\") -> Image:\r\n return Image.fromarray(self.fb, mode=\"L\")", "def test_created_invalid_image(self):\n res = self.client.post(IMAGE_URL,\n data={'image': 'no_image'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def create_initial_pictures():\n pictures = []\n\n for i in xrange(0, 9):\n pic = Picture()\n pic = Picture.create_random(pic, 1)\n\n pictures.append(pic)\n\n return pictures", "def make_dummy_file(file, shape=(15, 2000, 2000), n_ext=2):\n # Primary HDU\n hdulist = [fits.PrimaryHDU()]\n\n # SCI extensions\n for n in range(n_ext):\n hdu = fits.ImageHDU(data=np.random.normal(size=shape), name='SCI_{}'.format(n))\n hdulist.append(hdu)\n\n # Make list\n hdulist = fits.HDUList(hdulist)\n\n # Write the file\n hdulist.writeto(file, overwrite=True)\n\n print(\"{} MB file created at {}\".format(os.path.getsize(file) / 1000000, file))", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def create_image(config, size_mb):\n\n delete_image(config)\n iotests.log(\"truncate %s --size %dMB\" % (config.image_path(), size_mb),\n filters=[iotests.filter_test_dir])\n with open(config.image_path(), \"w\") as fn:\n fn.truncate(size_mb * 1024 * 1024)", "def __init__(self, width, height):\n self._image = tk.PhotoImage(master=root, width = width, height = height)\n self.fill((0,0,0))", "def no_image(cls):\n def eval_fn(p: Posting):\n if p.img_url is None:\n return f\"I couldn't find any images for this posting.\"\n\n return cls(eval_fn)", "def __create_blank_page__(self):\n with open(\"active_weather.basic.exp\"+str(self.box_count)+\".box\",\"w\") as f:\n f.write(\"\")\n\n self.width = 2508\n # self.height = 200\n self.height = 4000\n self.training_page = np.zeros((self.height,self.width),dtype=np.uint8)\n self.training_page.fill(255)\n\n self.row_bitmaps = []\n self.row_characters = []\n\n self.row_pointer = spacing\n self.column_pointer = spacing\n\n\n # self.__box_file_flush__()\n self.box_file_entries = []\n self.used_height = spacing", "def create_img(X_train, X_test, y_train, y_test, labels, model, visualizer, upsampled, IMG_OUTPUT_FILEPATH):\n viz = Visualizer(X_train, X_test, y_train, y_test, labels, model, visualizer, upsampled=upsampled)\n viz.evaluate()\n if upsampled == True:\n outpath_ = IMG_OUTPUT_FILEPATH + str(model).split('(')[0] + '/' + visualizer + '_upsampled.png'\n else:\n outpath_ = IMG_OUTPUT_FILEPATH + str(model).split('(')[0] + '/' + visualizer + '.png'\n viz.visualizer.show(outpath=outpath_, clear_figure=True)", "def builder_will_create_target_image(self, builder, target, image_id, template, parameters):" ]
[ "0.80658996", "0.789249", "0.73570555", "0.719231", "0.71430326", "0.7110544", "0.69541055", "0.69369465", "0.69369465", "0.69088554", "0.69088554", "0.6905223", "0.6859873", "0.6651872", "0.65808094", "0.65179014", "0.6485663", "0.64796895", "0.6460028", "0.6458123", "0.63959295", "0.637956", "0.6311849", "0.62752634", "0.6267765", "0.6159564", "0.6155874", "0.6090965", "0.60397303", "0.60341364", "0.60111356", "0.6009815", "0.60005873", "0.5974332", "0.5942975", "0.5905334", "0.58878225", "0.58763105", "0.58750415", "0.5872451", "0.5860547", "0.5835472", "0.5834946", "0.58275646", "0.5824303", "0.58068603", "0.580354", "0.5802434", "0.580132", "0.57825565", "0.5760148", "0.5744684", "0.5713735", "0.5706688", "0.5688825", "0.5681811", "0.56624824", "0.5648412", "0.5645863", "0.5627454", "0.56207865", "0.5616188", "0.56153196", "0.5592603", "0.55909836", "0.55889624", "0.55620843", "0.55577976", "0.5552983", "0.553182", "0.55315524", "0.55224496", "0.55197525", "0.5512037", "0.5502626", "0.5500158", "0.5494596", "0.5487518", "0.5453012", "0.54524374", "0.5430848", "0.54251826", "0.5413909", "0.54118156", "0.54110116", "0.54093266", "0.54067636", "0.54019356", "0.54000145", "0.5391611", "0.5386717", "0.53854907", "0.5377227", "0.5370334", "0.5367339", "0.5361959", "0.5355873", "0.5354761", "0.5353332", "0.53503114", "0.53497016" ]
0.0
-1
Parse a Spider filename string and insert parameters.
def parse_spider_fname(mystr, *fieldvals): # helper functions and classes def rm_stack_char(mystr): "Helper function to remove a stack character if it exists" stackloc = mystr.find("@") if stackloc != -1: # there's an '@' somewhere if len(mystr) - 1 == stackloc: # It's at the end of the string return mystr[:-1] else: # '@' not at the end, so it's an error raise ValueError, "Invalid format: misplaced '@'." else: # no '@' at all return mystr class Fieldloc: "Helper class to store description of a field" def __init__(self, begin, end): self.begin = begin self.end = end def count(self): "Size of the field (including braces)" return self.end - self.begin + 1 def find_fields(mystr): "Helper function to identify and validate fields in a string" fields = [] loc = 0 while True: begin = mystr.find('{', loc) if begin == -1: break end = mystr.find('}', begin) field = Fieldloc(begin, end) # check validity asterisks = mystr[begin+1:end] if asterisks.strip("*") != "": raise ValueError, "Malformed {*...*} field: %s" % \ mystr[begin:end+1] fields.append(Fieldloc(begin, end)) loc = end return fields # remove leading whitespace mystr.strip() # remove stack character (if it exists) mystr = rm_stack_char(mystr) # locate fields to replace fields = find_fields(mystr) if len(fields) != len(fieldvals): # wrong number of fields? raise ValueError, "Number of field values provided differs from" \ "the number of {*...*} fields." newstrfrags = [] loc = 0 for i, field in enumerate(fields): # text before the field newstrfrags.append(mystr[loc:field.begin]) # replace the field with the field value fieldsize = field.count() - 2 fielddesc = "%0" + str(fieldsize) + "d" newstrfrags.append(fielddesc % fieldvals[i]) loc = field.end + 1 newstrfrags.append(mystr[loc:]) return "".join(newstrfrags)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_filename(cls, filename):\n words = filename.split('_')\n return words[0], int(words[1][1:]), int(words[2])", "def parseFileName(filename):\n entry = DataEntry(\"\",0,{},{},0,0)\n wordArray = filename.split(\".\")\n entry.publication_name = wordArray[1]\n entry.year = wordArray[0]\n return entry", "def parse(source, *, filename=\"[STRING]\", typecheck=True):\n return _Parser(filename, source, typecheck).parse()", "def parseFilePath(self, filepath):\n\n li = filepath.split(\"/\") \n last = li[-1].split(\"_\")\n\n self.subjectName = li[-2]\n self.experimenterName = li[-3]\n self.experimentDate = last[-1]\n self.paradigm = last[-2]\n self.subjectName = last[-3]", "def parse(filename):\n file_map = {\n '1995-1996.html': ninety_six,\n '2005-2006.html': twenty_six,\n '2014-2015.html': twenty_fifteen\n }\n func = file_map.get(filename, lambda: \"Invalid File\")\n func(filename)", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def ParseFileName(self, fn, fnParser):\n self.filename = fn\n attrs = fnParser.Parse(fn)\n [setattr(self, k, _TryNumeric(v)) for k, v in attrs]", "def parse_glider_filename(filename):\n head, tail = os.path.split(filename)\n\n matches = re.search(r\"([\\w\\d\\-]+)-(\\d+)-(\\d+)-(\\d+)-(\\d+)\\.(\\w+)$\", tail)\n\n if matches is not None:\n return {\n 'path': head,\n 'glider': matches.group(1),\n 'year': int(matches.group(2)),\n 'day': int(matches.group(3)),\n 'mission': int(matches.group(4)),\n 'segment': int(matches.group(5)),\n 'type': matches.group(6)\n }\n else:\n raise ValueError(\n \"Filename ({}) not in usual glider format: \"\n \"<glider name>-<year>-<julian day>-\"\n \"<mission>-<segment>.<extenstion>\".format(filename)\n )", "def main():\n parse_file(sys.argv[1])", "def parse_filename(filename): # , time_fmt=TIME_INFILE_FMT):\n # Split the name up into its \"blocks\"\n parts = filename.split(\"_\")\n hive_str, rpi_str = parts[1:3]\n day_str = parts[3]\n method = parts[5]\n\n # Parse Hive and RPi number\n hive = int(hive_str[-1])\n rpi = int(rpi_str[-1])\n method = method.strip(\".csv\")\n\n # # Parse timestring into a datetime object\n # dt_naive = datetime.strptime(t_str, time_fmt)\n # dt_utc = pytz.utc.localize(dt_naive)\n\n return hive, rpi, method, day_str", "def _parse(self, infile):\n raise NotImplementedError()", "def parse(self, infile):\r\n raise NotImplementedError()", "def init_from_string(self, fs_in, param_string):\n if '(' in param_string:\n name_params_re = re.compile(r'(\\w*)\\((.*)\\)$')\n pieces = name_params_re.match(param_string)\n name = pieces.group(1)\n params = pieces.group(2)\n param_list = params.split(';')\n param_dict = {}\n for param in param_list:\n if '=' not in param:\n raise ValueError('preprocess param %s missing a value.' % param)\n k, v = param.split('=', 1)\n if v.isdigit():\n v = int(v)\n else:\n try:\n v = float(v)\n except ValueError:\n pass\n param_dict[k] = v\n self._name = name\n self.init_highpass(param_dict['highpass_cutoff'],\n param_dict['highpass_order'])\n self.init_channel_numbers(param_dict['channel_numbers'])\n else:\n self.__init__(self, fs_in, param_string)", "def insert(self, file_token, *file_tokens, preprocessor):\n tokens = (file_token,) + file_tokens\n for token in tokens:\n preprocessor.insert_file(self._get_filename(token))", "def _parse_file_path(self, input_path):\n pass", "def parse_parameters(parser):\n parser.add_argument('--parameters-file', '-p', help='parameter filename',\n required=True)\n return parser", "def preprocess(self, source, name, filename=None):\n if not name or not os.path.splitext(name)[1] in self.environment.file_extensions:\n return source\n output = StringIO()\n lexer = Lexer(iter(source.splitlines()))\n Parser(lexer, callback=output.write, debug=self.environment.slim_debug).parse()\n\n if self.environment.slim_print:\n print output.getvalue()\n\n return output.getvalue()", "def __init__(self, filename=None, label=None, tokens=None):\n if label: # specify from label/tokens, for testing.\n self.label = label\n self.tokens = tokens\n self.postID = -1\n self.likes = -1\n else: # specify from file.\n self.filename = filename\n parsedNames = filename.split(\"#\")\n if 'pop' in parsedNames[0]:\n self.label = 'pop'\n else:\n self.label = 'sod'\n self.postID = parsedNames[1]\n self.likes = parsedNames[2]\n self.tokenize()", "def init_from_file(filename, parser=int):\n filename = filename + \".\" + str(PID)\n\n def __parser_couple(s):\n s = s.replace(\"(\", \"\")\n s = s.replace(\")\", \"\")\n ss = s.split(\",\")\n return int(ss[0]), int(ss[1])\n\n p = PTree()\n content = SList([])\n with open(filename, \"r\") as f:\n count_line = 0\n for line in f:\n if line.strip()[0] == '#':\n continue\n # __distribution: PID -> nb of segments\n # __global_index: num seg -> (start, offset)\n if count_line == 0: # Get the distribution\n p.distribution = SList.from_str(line)\n p.start_index = p.distribution.scanl(lambda x, y: x + y, 0)[PID]\n p.nb_segs = p.distribution[PID]\n elif count_line == 1: # Get the global_index\n p.global_index = SList.from_str(line, parser=__parser_couple)\n else: # Get the content\n content.extend(Segment.from_str(line, parser=parser))\n count_line = count_line + 1\n p.content = content\n return p", "def setSourceFile(filename):", "def __init__(self, line_parser, *filename):\n \n self.line_parser = line_parser\n self.f = fileinput.input(filename)", "def _parse(\n self, source: str, name: t.Optional[str], filename: t.Optional[str]\n ) -> nodes.Template:\n return Parser(self, source, name, filename).parse()", "def parse(self, fstring):\n pass", "def parse_pts(pts_result_file, global_var_list, parsed_results_file):\n fill_value_name(pts_result_file, global_var_list, parsed_results_file)", "def parse_filenames(filenames):\n \n for fn in filenames:\n dirname, basename = path.split(fn)\n subject_visit = basename[:7]\n visit = basename[5:7]\n yield dirname, basename, subject_visit, visit", "def parse_parameters(filename):\n\n # read in the parameters\n mainInput = ParserClass.Parser(filename)\n if 'LogFile' in mainInput['Inputs']:\n if mainInput['Inputs']['LogFileUsePID']:\n logger = Logging.Logger(mainInput['Inputs']['LogFile']+'_{}'.format(os.getpid()))\n else:\n logger = Logging.Logger(mainInput['Inputs']['LogFile'])\n \n else:\n logger = print\n\n # Generate a filelist to loop over\n filelist = np.loadtxt(mainInput['Inputs']['filelist'],dtype=str,ndmin=1)\n if isinstance(mainInput['Inputs']['data_dir'], type(None)):\n filelist = [filename for filename in filelist]\n else:\n filelist = ['{}/{}'.format(mainInput['Inputs']['data_dir'],\n filename.split('/')[-1]) for filename in filelist]\n \n # Some items should always be a list\n if not isinstance(mainInput['Inputs']['pipeline'], list):\n mainInput['Inputs']['pipeline'] = [mainInput['Inputs']['pipeline']]\n # Get the class names (modulename, classname)\n jobnames = [c for c in mainInput['Inputs']['pipeline']]\n\n logger('Running: '+' '.join(mainInput['Inputs']['pipeline']))\n\n\n prejobnames = [c for c in mainInput['Inputs']['preamble']]\n\n\n # Read the class parameter file\n classInput = ParserClass.Parser(mainInput['Inputs']['classParameters'])\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n jobs = []\n for job in jobnames:\n jobs += [getClass(job)(logger=logger,**classInput[job])]\n\n # Initalise the classes : classInput are the kwargs to initiate classes\n prejobs = []\n for prejob in prejobnames:\n prejobs += [getClass(prejob)(logger=logger,**classInput[prejob])]\n\n\n return jobs,prejobs, filelist, mainInput, classInput, logger", "def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')", "def parse_file_name(file_name):\n\n elements = file_name.split(\"_\")\n if file_name.find(\"_VI_\") > 0:\n client = elements[0]\n capture_range = \"R1\"\n condition = elements[2]\n polarization = \"VIS\"\n shot = elements[4]\n modality = \"VIS\"\n else:\n client = elements[0]\n capture_range = elements[1]\n condition = elements[2]\n polarization = elements[3]\n shot = elements[4]\n modality = \"THERMAL\"\n \n return client, capture_range, condition, polarization, shot, modality", "def parse_infile(self, infile):\n\n if type(infile)==str:\n print('Im a string')\n folder, file = os.path.split(infile)\n elif type(infile) in [list, tuple]:\n if not len(infile) == 2:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n else:\n folder, file = infile\n else:\n raise(Exception('The infile must be a string or a length 2 sequence'))\n \n self.folder = folder\n self.file_ = file", "def parser(filename):\n\n regex = re.compile(\n # prolog\n r\"run(?P<run>\\w+)\"\n ##r\"\\-(?P<code_name>((mfdn)|(obscalc-ob))[^\\-]*)\"\n r\"\\-(?P<descriptor>\"\n # descriptor contents\n r\"Z(?P<Z>\\d+)\\-N(?P<N>\\d+)\"\n r\"\\-(?P<interaction>.+)\\-(?P<coulomb>\\d)\"\n r\"\\-(?P<truncation_descriptor>.+)\"\n ## r\"\\-Nmax(?P<Nmax>\\d+)\"\n # epilog\n r\").res\"\n )\n\n conversions = {\n \"Z\" : int,\n \"N\" : int,\n \"interaction\" : str,\n \"coulomb\" : int,\n }\n\n match = regex.match(filename)\n if (match == None):\n raise ValueError(\"bad form for spncci results filename: \" + filename)\n info = match.groupdict()\n\n # convert fields\n for key in conversions:\n conversion = conversions[key]\n info[key] = conversion(info[key]) if (info[key] is not None) else None\n\n return info", "def input_parse(filename,params_flag=False):\r\n\r\n input_parameters ={}\r\n with open(filename, 'r') as f:\r\n count = 0\r\n\r\n for line in f:\r\n line=line.strip()\r\n if line:\r\n if line.find('#') == -1:\r\n if not params_flag:\r\n var_name,var_value = line.split(',')[0],\",\".join(line.split(',')[1:]) # handle lines with more than 1 comma\r\n try:\r\n input_parameters[var_name] = float(var_value)\r\n except ValueError: # This occurs when python cannot convert list into a float.\r\n # Evaluate the python expression as a list\r\n input_parameters[var_name] = ast.literal_eval(var_value)\r\n else:\r\n if count==0:\r\n var_name = line.strip('\\n')\r\n input_parameters[var_name] = []\r\n count+=1\r\n else:\r\n try:\r\n input_parameters[var_name].append(float(line.strip('\\n')))\r\n except ValueError: # This occurs when python cannot convert list into a float.\r\n # Evaluate the python expression as a list\r\n input_parameters[var_name].append(ast.literal_eval(line.strip('\\n')))\r\n return input_parameters", "def __init__(self, filename, delimiter):\n self.filename = filename\n self.delimiter = delimiter\n \n # open file and process\n self.headers = []\n self.data = []\n self._process_file()", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def parseConfig(self, filename):\n parameters = {}\n try:\n f = open(filename)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n print('cannot open', filename)\n raise\n else:\n for line in f:\n # Remove text after comment character.\n if self.comment_char in line:\n line, comment = line.split(self.comment_char,\n 1) # Split on comment character, keep only the text before the character\n\n # Find lines with parameters (param=something)\n if self.param_char in line:\n parameter, value = line.split(self.param_char, 1) # Split on parameter character\n parameter = parameter.strip() # Strip spaces\n value = value.strip()\n parameters[parameter] = value # Store parameters in a dictionary\n\n f.close()\n\n return parameters", "def split_file(document: str):\n class_name, sep, assignment_name = document.partition(\"-\")\n try:\n assignment_name = assignment_name.split('.')[0].split('_')[0]\n except TypeError:\n pass\n return class_name, assignment_name", "def instantiate_for_spirv_args(self, testcase):\n testcase.stdin_shader = self.source\n self.filename = '-'\n return self.filename", "def parse_image_filename(filename):\n\n # regexes\n starts_with_six_digits = re.compile(r'^\\d{6}')\n capital_letter = re.compile(r'([A-Z]{1})')\n plus = re.compile(r'\\+')\n\n # split the filename and extention\n filename, extension = os.path.splitext(filename)\n try:\n style_number, color, description = filename.split('_')\n except Exception as e:\n print(e)\n print(filename, extension)\n\n style_number = int(style_number)\n\n # decode the color\n # intCaps -> int/caps\n color = capital_letter.sub(r'/\\1', color).lower()\n # plus+to+space -> plus to space\n color = plus.sub(r' ', color)\n\n # decode the description\n description = plus.sub(r' ', description)\n\n return style_number, color, description", "def setFile(self, filename):\n self.prepare() #new call on each new file to process\n self.filename = \"%s\" % filename", "def get_params(self, paramFile):\n\n with open(paramFile, 'r') as f:\n titleLine = next(f)\n\n for line in f:\n p, i, v = line.split(\",\")\n\n self.params.update(p, v, i)", "def parse_filename(cls, filename):\n #from nose.tools import set_trace; set_trace()\n m = re.match(cls._pattern, os.path.basename(filename))\n basename = m.group(1)\n bandname = cls._bandmap.get(m.group(2), m.group(2))\n return basename, bandname", "def __init__(self, fileName,\n openingMark = r'--oo<< *(?P<value>[^ \\n]+) *$',\n closingMark = r'--oo>> *$',\n hereMark = r'--oo== *(?P<value>[^ \\n]+) *$'):\n\n super(AnnotatedSourceFile,self).__init__(fileName)\n self.openingMark = openingMark\n self.closingMark = closingMark\n self.hereMark = hereMark\n\n fragmenter = modelscripts.base.fragments.RegexpFragmenter(\n self.sourceLines,\n openingMark, closingMark, hereMark,\n mainValue = self, firstPosition = 1)\n\n self.fragment = fragmenter.fragment\n \"\"\" The root fragment according to the given mark \"\"\"", "def __init__(self, filename):\n\n parser = Parser(filename=filename)\n self.uuid = parser.segregated(parser.read(),'UUID')\n self.id = parser.segregated(parser.read(),'ID')\n self.rate = parser.segregated(parser.read(),'RATE')\n self.gpio = parser.segregated(parser.read(),'GPIO')\n self.ddl = parser.segregated(parser.read(),'DATA_DELIVERY_LOCATION')", "def extract_params(self, fname):\n return re.findall(self.regexp_params, os.path.basename(fname))", "def parseFile(self, filename):\n\n f = open(filename, \"r\")\n s = f.read()\n f.close()\n\n logging.log(10, 'parsing filename %s: %d lines' % (filename, len(s)))\n\n self.parseString(s)", "def parse(self, fp):\n\n # create the plex scanner for fp\n self.create_scanner(fp)\n\n # call parsing logic\n self.stmt_list()\n print('Parsing successful!')", "def train_image_parse_function(filename, *argv):\n image = read_image(filename)\n image = tf.image.random_flip_left_right(image)\n\n if FLAGS.augmentation:\n print('data augmentation')\n resized_image = resize_and_random_crop_image(image)\n else:\n resized_image = resize_image(image)\n resized_image = scale_image_value(resized_image)\n\n if len(argv) == 1:\n return resized_image, argv[0]\n elif len(argv) == 2:\n return resized_image, argv[0], argv[1]\n else:\n return resized_image", "def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()", "def _parse_bids_filename(fname, verbose):\n keys = ['sub', 'ses', 'task', 'acq', 'run', 'proc', 'run', 'space',\n 'rec', 'split', 'kind']\n params = {key: None for key in keys}\n idx_key = 0\n for match in re.finditer(param_regex, op.basename(fname)):\n key, value = match.groups()\n if key not in keys:\n raise KeyError('Unexpected entity \"%s\" found in filename \"%s\"'\n % (key, fname))\n if keys.index(key) < idx_key:\n raise ValueError('Entities in filename not ordered correctly.'\n ' \"%s\" should have occurred earlier in the '\n 'filename \"%s\"' % (key, fname))\n idx_key = keys.index(key)\n params[key] = value\n return params", "def insert_file_via_perl(filename, comment=\"Added by Python Job\"):\n for line in lines(['ImportSingleFileIntoPosdaAndReturnId.pl', filename, comment]):\n if line.startswith(\"File id:\"):\n return int(line[8:])\n\n # TODO: pass on the error if there was one\n raise RuntimeError(\"Failed to insert file into posda!\")", "def preprocess(\n self,\n source: str,\n name: t.Optional[str] = None,\n filename: t.Optional[str] = None,\n ) -> str:\n return reduce(\n lambda s, e: e.preprocess(s, name, filename),\n self.iter_extensions(),\n str(source),\n )", "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def __init__(self, in_file=None, search_str=\"\", buffer_size=None):\n super(FileParser, self).__init__()\n self.buffer_size = buffer_size or 1\n self.search_path = in_file\n self.update(self._parser(search_str=search_str))", "def parseParams():\n parser = argparse.ArgumentParser(\n prog=__appname__,\n description=__desc__,\n prefix_chars=\"-/\",\n fromfile_prefix_chars='@',\n add_help=False)\n parser.add_argument('source',\n type=str,\n nargs='?',\n help=\"File containing valid syntax. (Default Standard Input)\"\n )\n parser.add_argument('output',\n type=str,\n help=\"Output Image. (required)\"\n )\n parser.add_argument('-f', '--fmt', '/f',\n dest='fmt',\n default='SVG',\n choices=['SVG', 'PDF', 'EPS', 'PNG'],\n nargs='?',\n help='Format of the Output file. Formats other than SVG require Inkscape in the system path.')\n parser.add_argument('-s', '--syntax', '/s',\n dest='syntax',\n default='EBNF',\n choices=['EBNF', 'DIAGRAM'],\n nargs='?',\n help='Syntax used for the Input.')\n parser.add_argument('-h', '--help', '/h',\n action='help',\n help=\"show this help message and exit.\")\n parser.add_argument('-v', '--version', '/v',\n action='version',\n version=\"{0:s} version {1:s}\".format(__title__, __version__))\n return parser.parse_args()", "def parseFile(self,filename):\n\n name = '[0-9a-zA-Z_]+'\n string = '\\\\\"(.+)\\\\\"'\n\n testclass = None\n functionName = None\n\n fin = open(filename, 'r')\n for line in fin:\n # testclass starts\n res = re.match('class ('+name+')', line)\n if res != None:\n testclass = res.group(1)\n\n # end of testclass \n if re.match('};', line) != None:\n testclass = None\n\n # function start\n res = re.match('\\\\s+void ('+name+')\\\\(\\\\)', line)\n if res != None:\n functionName = res.group(1)\n\n elif re.match('\\\\s+}', line) != None:\n functionName = None\n\n if functionName == None:\n continue\n\n # check\n res = re.match('\\s+check.*\\('+string, line)\n if res != None:\n code = res.group(1)\n\n # code..\n res = re.match('\\\\s+'+string, line)\n if res != None:\n code = code + res.group(1)\n\n # assert\n res = re.match('\\\\s+ASSERT_EQUALS\\\\(\\\\\"([^\"]*)\\\\\",', line)\n if res != None and len(code) > 10:\n node = { 'testclass':testclass,\n 'functionName':functionName,\n 'code':code,\n 'expected':res.group(1) }\n self.nodes.append(node)\n code = ''\n\n # close test file\n fin.close()", "def read_parse_file(params):\n\tparam_names = []\n\tparam_options = []\n\tif not os.path.isfile(params.parse_file):\n\t\tprint(\"parse file does not exist! ({})\".format(params.parse_file))\n\t\tsys.exit(NO_PARSE)\n\twith open(params.parse_file, 'r') as pf:\n\t\t# first line should be iteration regex\n\t\tsetattr(params, 'iteration_regex', re.compile(pf.readline().strip()))\n\t\tfor line in pf:\n\t\t\tparam_desc = line.split(';')\n\t\t\tparam_names.append(param_desc[0])\n\t\t\tparam_options.append(param_desc[1])\n\n\treturn param_names,param_options", "def parse_vid_path(vid_path):\n # ensures all path separators are consistent with OS\n vid_path = os.path.normpath(vid_path)\n i_start = vid_path.rfind(os.path.sep)\n vid_file = vid_path[i_start+1:]\n # cuts out extension and splits by underscores\n tokens = vid_file[:-4].split('_')\n prefix = ''\n\n for i, token in enumerate(tokens):\n if token.isnumeric():\n break\n elif i != 0:\n prefix += '_'\n prefix += token\n \n params = {'prefix' : prefix,\n 'fps' : int(tokens[i]),\n 'exp_time' : read_dash_decimal(tokens[i+1]),\n 'Q_i' : read_dash_decimal(tokens[i+2]),\n 'Q_o' : int(tokens[i+3]),\n 'd' : int(tokens[i+4]),\n 'mag' : int(tokens[i+5]),\n 'num' : int(tokens[i+6])}\n\n return params", "def __init__(self, root_directory, filename):\n self.source_name = filename\n extensionless, _ = path.splitext(filename)\n self.binary_name = extensionless + \".sept\"\n self.name = path.relpath(extensionless, root_directory)", "def importParameterBoundaryFile(paramfilename):\n try:\n infile = open(paramfilename, \"r\")\n except IOError:\n\t print \"Unable to open file %s\" % (paramfilename)\n\t raise IOError(\"Unable to open parameter boundary file %s\" % (paramfilename))\n lines = infile.readlines()\n infile.close()\n\n # Parse\n paramdict = {}\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n continue\n elif line[0] == '#':\n continue\n else:\n terms = line.split()\n name = terms[0]\n value = float(terms[1])\n parmin = float(terms[2])\n parmax = float(terms[3])\n stepsize = float(terms[4])\n \n paramdict[name] = [value, parmin, parmax, stepsize]\n # ENDIF\n # ENDFOR\n\n return paramdict", "def parse(filename):\n if filename.endswith('.tsv'):\n # Horrible filename-based hack; in future examine contents of file here\n return parse_separated(filename, '\\t')\n if filename.endswith('.xlsx'):\n return parse_xlsx(filename)\n # fallback to old assume-csv behavior\n return parse_separated(filename, ',')", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def add_injection_parameters_from_file(self, injection_file, **kwargs):\n self.injection_parameters = self._grab_injection_parameters_from_file(\n injection_file, **kwargs\n )", "def __init__(self, filename):\r\n self._results = SpecParser(filename).parse()", "def _filename_pre_data(self) -> dict:\n key = []\n remainder = \"\"\n prework = {}\n for i in self.draft_file:\n if i == \"{\":\n remainder = \"\"\n elif i == \"}\":\n key.append(remainder)\n else:\n remainder += i\n list_filename = self.filename.split(\"_\")\n for key, value in zip(key, list_filename):\n prework[key] = value\n self.pre_data = prework", "def parse_ir(self, filename):\n with open(filename, 'r') as f:\n configs_data = f.readlines()\n for line in configs_data:\n # remove the \\n char\n cur_line = line[:-1]\n title = re.findall('.+?:', cur_line)\n # remove the : char\n cur_title = title[0][:-1]\n content = re.findall(':.+', cur_line)\n cur_content = content[0][1:]\n exec('self.config_dict[cur_title]=' + cur_content)", "def register_filename_format(format_name,parser):\n if format_name == \"ALL\":\n raise ValueError(\"filename format code ALL is reserved\")\n\n filename_format_parser[format_name] = parser", "def load_str(str, filename, default_namespace=None, verbose=False):\n paramlist = []\n default_namespace = default_namespace or get_ros_namespace()\n for doc in yaml.load_all(str):\n if NS in doc:\n ns = ns_join(default_namespace, doc.get(NS, None))\n if verbose:\n print(\"reading parameters into namespace [%s]\"%ns)\n del doc[NS]\n else:\n ns = default_namespace\n paramlist.append((doc, ns))\n return paramlist", "def process_feature_file(filename: str) -> Dict[str, Any]:\n feature = json.loads(open(filename).read())\n template = feature['query']\n name = feature['name']\n params = feature['params']\n feature_spec = {\n 'name': name,\n 'template': template,\n 'params': params\n }\n return feature_spec", "def load_params_from_file(self, input_file):\n\n ### FILL IN ###", "def parse_filename(filename, full_output=False):\n\n basename = filename.split(\"/\")[-1]\n teff = float(basename.split(\"t\")[1].split(\"g\")[0])\n logg = float(basename.split(\"g\")[1].split(\"k\")[0])/10.\n feh = float(basename[1:4].replace(\"p\", \"\").replace(\"m\", \"-\"))/10.\n alpha = [0, 0.4][basename[4] == \"a\"]\n parameters = [teff, logg, feh, alpha]\n\n if full_output:\n names = (\"effective_temperature\", \"surface_gravity\", \"metallicity\",\n \"alpha_enhancement\")\n return (parameters, names)\n return parameters", "def setup_parse(self, inputstring: str, document: nodes.document) -> None:\n self.inputstring = inputstring\n self.document = document", "def main(filename: str, /) -> None:", "def __init__(self, filename, default_sep=None):\n self.filename = filename\n if default_sep is not None:\n self.separator = default_sep", "def __parseDailyFilename(self, f):\n base = os.path.basename(f)\n\n tokens = base.split('.')\n if len(tokens) < 6:\n # assume it's an old file in the format A2000089etcetc.tif i.e. ?YYYYDDD*\n yr = base[1:5]\n day = base[5:8]\n else:\n # assume it's a file in the newer format ?*.YYYY.DDD.etc format\n varname, yr, day, temporalSummary, res, spatialSummary = tokens[0:6]\n outTemplate = varname + \"{}.{}.{}.\" + \"{}.{}.{}.tif\".format(temporalSummary, res, spatialSummary)\n if self._outTemplate == \"FILLED-OUTPUT{}.{}.{}.TemporalSummary.Res.SpatialSummary.tif\":\n self._outTemplate = outTemplate\n else:\n assert self._outTemplate == outTemplate\n return day, yr", "def read_input_file(input_path, split_char='=', cmnt_char='#'):\n # initializes dictionary to hold parameters\n params = {}\n\n # opens file\n file_obj = open(input_path)\n\n # reads in parameters from the lines of the file\n for line in file_obj:\n # removes comments at the end of the line\n line = line.split(cmnt_char, 1)[0]\n # removes spaces padding the line\n line = line.strip()\n key_value = line.split(split_char)\n # checks that there is a key and value (2 items)\n if len(key_value) == 2:\n # loads parameter\n params[key_value[0].strip()] = key_value[1].strip()\n\n return params", "def parse_files(files):\n ans = []\n if files:\n for f in files:\n split = f.split(\"=\")\n if len(split) != 2:\n raise Exception(\"invalid file specification '%s'\" % f)\n ans.append((split[0], split[1]))\n return ans", "def parseString(self, val):\n \n if not isinstance(val, str):\n raise Exception('Input must be a string!')\n if len(val) < 9:\n raise Exception( 'ESDT Names must be 9 characters!' )\n self.setType( val[:2] )\n self.setTime( val[2] )\n self.setFrequency( val[3] )\n self.setHRes( val[4] )\n self.setVRes( val[5] )\n self.setGroup( val[6:9] )\n tmp = val.split('.')\n if len(tmp) == 4:\n self.setVersion( *tmp[1:] )", "def filename_par(filename, searchstr):\n\tstart = filename.find(searchstr) + len(searchstr)\n\tfinish = start + 1\n\twhile unicode(filename[start:].replace(\".\",\"\"))[:finish-start].isnumeric():\n\t\tfinish += 1\n\treturn float(filename[start:finish-1])", "def process_file(_dir, _name, pkg_id):\n file_path = os.path.join(_dir, _name)\n if os.path.islink(file_path):\n return\n try:\n fp = open(file_path, 'r')\n first_line = fp.readline()\n if constants.PYTHON in first_line:\n _func = extract_python_kwds\n elif constants.BASH in first_line or constants.SHELL in first_line:\n _func = extract_shell_kwds\n else:\n # Return if file is not a python or a shell script.\n return\n # Populates the keywords in the file.\n _func(fp, pkg_id)\n if _name not in constants.STOP_WORDS:\n insert_keyword(_name.split('.')[0], pkg_id)\n except Exception as e:\n# @todo(Logging, level info)\n print file_path, e", "def func_PARSE(self):\n self.parsed_url = parse.urlparse(\"http://{0}:{1}{2}\".format(args.HTTP_HOST, args.HTTP_PORT, self.path).lower())\n self.parsed_param = parse.parse_qs(self.parsed_url[4])", "def parse_bids_filename(filename: str) -> dict:\n parsed = {}\n results = list(re.search(bids_pattern, filename).groups())\n parsed[\"ext\"] = results.pop()\n while results:\n item = results.pop()\n if item is not None:\n parsed.update(_parse_segment(item))\n return parsed", "def _ParseExtensionsString(self, extensions_string):\n if not extensions_string:\n return\n\n extensions_string = extensions_string.lower()\n extensions = [\n extension.strip() for extension in extensions_string.split(u',')]\n file_entry_filter = file_entry_filters.ExtensionsFileEntryFilter(extensions)\n self._filter_collection.AddFilter(file_entry_filter)", "def _parse_parameters(self, parameters_text):\n for mo in re.finditer(self._PARAMETERS_RE, parameters_text):\n self._parameters.append(Parameter(mo.group(\"param_name\"), mo.group(\"default_value\")))", "def parse_movie_title(file_name):\n\tmovie_name = os.path.basename(file_name)\n\tmovie_name = parsers.remove_extension(file_name)\n\tmovie_name = movie_name.lower()\n\tmovie_name = parsers.fix_word_seperators(movie_name)\n\tmovie_name = parsers.remove_tags(movie_name)\n\tmovie_name = parsers.remove_resolution(movie_name)\n\tmovie_name = parsers.remove_keywords(movie_name)\n\tmovie_name = parsers.remove_year(movie_name)\n\tmovie_name = parsers.remove_trailing_symbols(movie_name)\n\tmovie_name = parsers.remove_trailing_crap(movie_name)\n\tmovie_name = parsers.fix_the_at_the_end(movie_name)\n\tmovie_name = parsers.fix_a_at_the_end(movie_name)\n\tmovie_name = parsers.remove_double_spaces(movie_name)\n\tmovie_name = movie_name.strip()\n\tmovie_name = parsers.recapitalize(movie_name)\n\treturn movie_name", "def from_text(cls, filename, alpha=None, pat=None, pat_args=None,\n auto_fields=None):\n with open(os.path.expanduser(filename), encoding='utf-8') as f:\n return cls(f, alpha, pat, pat_args)", "def parse_parameters(filePath):\r\n numThreads, queue, affinity = 0,\"\",\"\"\r\n \r\n for line in open(filePath):\r\n if \"spec.omp2001.size:\" in line:\r\n if get_last_column_number(line)==\"test\":\r\n print(\"IS TEST SIZE!!1 : \" + filePath)\r\n \r\n if \"spec.omp2001.sw_threads:\" in line:\r\n numThreads = int(get_last_column_number(line))\r\n \r\n if \"spec.omp2001.mach:\" in line:\r\n machine = line.split(\" \")[-1]\r\n columns = machine.split(\".\")\r\n \r\n queue = columns[0]\r\n affinity = columns[1]\r\n \r\n return numThreads, queue, affinity", "def parse_data(fp):\n pass", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def import_parameters(self, file_name):\n parameters = []\n\n with open(file_name) as in_file:\n parameters = json.load(in_file)\n\n if parameters:\n self.put_parameters(parameters)", "def name_parser(string):\n return string.replace('\\n', ' ')", "def process(self, filename: str, contents: str) -> None:\n self._current_file_decorators = set()\n self._current_file = filename\n try:\n parsed = ast.parse(contents, filename=filename)\n except Exception as e: # pylint: disable=broad-exception-caught\n # logging errors when parsing file\n logging.exception('Error parsing %s: %s', filename, e)\n else:\n self.visit(parsed)\n finally:\n self._current_file = None\n self._current_file_decorators = set()", "def parse_file(self, file_name):\n\n with open(file_name, \"r\") as input_file:\n file_contents = input_file.read()\n\n \"\"\"\n Regex is done on line by line basis - to ensure that irrespective\n of the formatting all docstrings are identified and all variable\n specifications are found.\n \"\"\"\n file_contents = file_contents.replace(\"\\n\", \"NEWLINE\")\n\n docstrings = self.get_docstrings(file_contents)\n variable_declarations = self.select_variable_declarations(docstrings)\n variable_declarations = [x.replace(\"NEWLINE\", \"\\n\") for x in variable_declarations]\n return variable_declarations", "def parse(self, script_str):\n lines = script_str.split('\\n')\n for line in lines:\n self.parse_line(line.strip())", "def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')", "def parse_infile_names(self):\n\n rv, slist = UTIL.list_minus_pref_suf(self.infiles,'out.ss_review.','.txt')\n if rv < 0: return\n if rv > 0:\n if self.verb > 1: print('++ trying to get SID from glob form')\n slist = UTIL.list_minus_glob_form(self.infiles, strip='dir')\n else:\n if self.verb > 1: print(\"++ have SIDs from 'out.ss_reiview' form\")\n\n if len(slist) == 0:\n if self.verb > 1: print(\"-- empty SID list\")\n return\n\n # make sure names are unique and not empty\n if not UTIL.vals_are_unique(slist):\n if self.verb > 1: print('-- SIDs not detected: not unique')\n return\n minlen = min([len(ss) for ss in slist])\n if minlen < 1:\n if self.verb > 1: print('-- SIDs not detected: some would be empty')\n return\n\n # we have a subject list\n self.snames = slist\n\n # now go for GID, start by replacing SIDs in infiles\n newfiles = [fname.replace(slist[ind], 'SUBJ') for ind, fname in\n enumerate(self.infiles)]\n\n if UTIL.vals_are_constant(newfiles):\n print('-- no groups detected from filenames')\n return\n\n # okay, try to make a group list\n glist = UTIL.list_minus_glob_form(newfiles)\n\n # cannot have dirs in result\n for gid in glist:\n if gid.find('/') >= 0:\n if self.verb>1: print('-- no GIDs, dirs vary in multiple places')\n return\n\n minlen = min([len(ss) for ss in glist])\n if minlen < 1:\n if self.verb > 1: print('-- GIDs not detected: some would be empty')\n return\n\n if self.verb > 1: print(\"++ have GIDs from infiles\")\n self.gnames = glist", "def parse_file(filename, db_factory, load_job_id, error_handler):\n db = db_factory()\n log = parse_file.get_logger()\n log.info('loading from %s', filename)\n try:\n num_cases = 0\n with codecs.open(filename, 'r', encoding='utf-8') as f:\n parser = vtr.Parser()\n for case in parser.parse(f):\n log.info('New case: %s/%s', case['book'], case['number'])\n num_cases += 1\n\n # Store the book\n try:\n db.books.update({'_id': case['book']},\n {'$set': {'year': int(case['book'].split('/')[0]),\n 'number': case['book'].split('/')[1],\n },\n '$addToSet': {'load_jobs': load_job_id,\n },\n },\n upsert=True,\n )\n except Exception as err:\n log.error('Could not store book %s: %s', case['book'], err)\n error_handler(unicode(err))\n\n # Store the case\n case['_id'] = '%s/%s' % (case['book'], case['number'])\n # associate the case record with the job for auditing\n case['load_job_id'] = load_job_id\n # pick a \"date\" for the case\n case['date'] = case.get('hearing_date') or case.get('arrest_date')\n try:\n db.cases.update({'_id': case['_id']},\n case,\n upsert=True,\n )\n except Exception as err:\n log.error('Could not store case %s: %s', case['_id'], err)\n error_handler(unicode(err))\n\n # Add participant info\n\n # index for upsert\n for p in get_encoded_participants(case, error_handler):\n #log.info('new participant: %r', p)\n p['case_id'] = case['_id']\n p['case_number'] = case['number']\n p['date'] = case['date']\n try:\n db.participants.update(\n {'case': case['_id'],\n 'encoding': p['encoding'],\n 'full_name': p['full_name'],\n 'role': p['role'],\n },\n p,\n upsert=True,\n )\n except Exception as err:\n log.error('Could not store participant %s for case %s: %s',\n p['_id'], case['_id'], err)\n error_handler(unicode(err))\n\n # Handle errors that did not result in new case records.\n errors = ['Parse error at %s:%s \"%s\" (%s)' % \\\n (filename, num, line, err)\n for num, line, err in parser.errors\n ]\n for e in errors:\n error_handler(e)\n except (OSError, IOError) as err:\n msg = unicode(err)\n errors = [msg]\n error_handler(msg)\n return {'errors': errors,\n 'num_cases': num_cases,\n }", "def __init__(self, f):\n self.index = -1\n self.__tokenized = []\n file = self.__readFile(f)\n while self.__commentHandler(file, COMMENT, NEW_LINE):\n file = self.__commentHandler(file, COMMENT, NEW_LINE)\n file = file.replace(NEW_LINE, \" \")\n while self.__commentHandler(file, MULTI_LINE_COMMENT_START, MULTI_LINE_COMMENT_END):\n file = self.__commentHandler(file, MULTI_LINE_COMMENT_START, MULTI_LINE_COMMENT_END)\n self.__regexMaster(file)", "def dispatch(self, filename):\n\n parser = self.find_parser(filename)\n if parser:\n parser.tell({\n 'command': 'parse',\n 'filename': filename\n })\n else:\n log.info('No parser for filename: {}'.format(filename))", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def parse(self, filename):\r\n return self.fromtree( ElementTree.parse(filename) )" ]
[ "0.5459443", "0.5448573", "0.5429191", "0.52798533", "0.5271916", "0.5247543", "0.52416116", "0.5215298", "0.51574737", "0.5120585", "0.5072528", "0.5036985", "0.5022814", "0.4980182", "0.49415386", "0.4940157", "0.49290508", "0.49222475", "0.49006563", "0.48977634", "0.48940113", "0.48884514", "0.48853", "0.48508862", "0.4831189", "0.4828288", "0.48280442", "0.48250598", "0.47992033", "0.47972468", "0.47771767", "0.4773233", "0.47715867", "0.47603905", "0.47384235", "0.47292638", "0.47249687", "0.4703285", "0.46953326", "0.469194", "0.4672991", "0.4664157", "0.4663032", "0.46629053", "0.46596935", "0.465447", "0.4654159", "0.46506816", "0.46425876", "0.46396428", "0.46296486", "0.46296486", "0.46249697", "0.4622102", "0.46015215", "0.45960686", "0.45929492", "0.45894977", "0.45848167", "0.45807895", "0.4567585", "0.4559083", "0.45490068", "0.4545571", "0.45451957", "0.45289043", "0.45219862", "0.45218718", "0.4517237", "0.45171303", "0.45155457", "0.4512571", "0.4508418", "0.44733927", "0.44696665", "0.4468715", "0.44625285", "0.44496748", "0.4448394", "0.4446431", "0.4444349", "0.44441938", "0.44403014", "0.4435095", "0.44340175", "0.4433671", "0.44309166", "0.4430827", "0.4424339", "0.4423634", "0.4415864", "0.44147328", "0.4407466", "0.44026887", "0.44003782", "0.43977904", "0.43969172", "0.43946016", "0.43860486", "0.43853277" ]
0.5895396
0
Print the data in slice iz, row ix of an image to standard out.
def print_row(input, ix=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, x = %d row)" % (iz, ix) line = [] for iy in xrange(ny): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((iy + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printImage(imageObject):\n # TODO\n pass", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def print_row(row,writer,x):\n sys.stdout.write(unichr(0x2503))\n for n in xrange(row.shape[0]-1):\n writer(row[n],Width,(x,n))\n sys.stdout.write(unichr(0x2502))\n if row.shape[0] > 0:\n writer(row[-1],Width,(x,row.shape[0]-1))\n sys.stdout.write(unichr(0x2503) + '\\n')", "def print(self):\n self.__print_local(self.dataset, 0)", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def display(self):\n for row in self.tile_rows:\n print(row)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def show_np(mat):\n for x in range(15):\n for y in range(15):\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif mat[x, y, 0] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif mat[x, y, 1] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])", "def printImage(currentImage):\n\tprint currentImage + ' is set to be printed...'", "def __printRow(self, i):\n if i < len(self.__data) and i >= self.__firstShownLine and \\\n i < self.__firstShownLine + self.height - 2:\n text = self.__formatString % self.__data[i]\n self._window.addnstr(i - self.__firstShownLine + 1, 1, text,\n self.width - 2)", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def myprint(dataset, indent=0):\n dont_print = ['Pixel Data', 'File Meta Information Version']\n\n indent_string = \" \" * indent\n next_indent_string = \" \" * (indent + 1)\n\n for data_element in dataset:\n if data_element.VR == \"SQ\": # a sequence\n print(indent_string, data_element.name)\n for sequence_item in data_element.value:\n myprint(sequence_item, indent + 1)\n print(next_indent_string + \"---------\")\n else:\n if data_element.name in dont_print:\n print(\"\"\"<item not printed -- in the \"don't print\" list>\"\"\")\n else:\n repr_value = repr(data_element.value)\n if len(repr_value) > 50:\n repr_value = repr_value[:50] + \"...\"\n print(\"{0:s} {1:s} = {2:s}\".format(indent_string,\n data_element.name,\n repr_value))", "def display(self):\n for row0 in range(self.y):\n print()\n for row in range(self.height):\n for column0 in range(self.x):\n print(\" \", end=\"\")\n for column in range(self.width):\n print(\"#\", end=\"\")\n print()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def printdata(self,whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.printdata(whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def display(self):\n for x, p in zip(self.xs, self.ps):\n print(x, p)", "def my_print(self):\n if self.__size > 0:\n for k in range(self.__position[1]):\n print()\n for i in range(self.__size):\n for j in range(self.__position[0]):\n print(\" \", end='')\n print(\"#\" * self.__size)\n else:\n print()", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def display(self):\n width = 1 + max(len(self.values[s]) for s in self.boxes)\n line = 'x'.join(['-'*(width*3)]*3)\n for r in self.rows:\n print(''.join(self.values[r+c].center(width)+('|' if c in '36' else '')\n for c in self.cols))\n if r in 'CF': print(line)\n print", "def display(self):\n result = []\n horizontal_bounds, vertical_bounds = self.get_bounds()\n min_x, max_x = horizontal_bounds\n min_y, max_y = vertical_bounds\n\n # xrange is inclusive in start and exclusive in end, IE [s..e)\n # so we add +1 offset to be safe\n for y in xrange(max_y, min_y - 1, -1):\n # since we have three rows and we're still relying on print,\n # displaying gets a bit dirty\n # will get cleaner once we move to something like HTML\n row_tiles = [self.tile(x, y) for x in xrange(min_x, max_x + 1)]\n\n # now we have to print each of the three rows together.\n # zip to aggregate each of the top, middle, bottom rows\n row_lines = zip(*[str(tile).split(\"\\n\") for tile in row_tiles])\n for line in row_lines:\n result.append(\"\".join(line))\n\n return \"\\n\".join(result)", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def display(self):\n for i in range(self.y):\n print()\n for i in range(self.height):\n for k in range(self.x):\n print(' ', end='')\n for j in range(self.width):\n print('#', end='')\n print()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def print_vector(self):\n print self.x, self.y, self.z", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def print_data(place):\n raise NotImplementedError", "def print(self):\n print(\" 0 1 2 3 4 5 6 7 8 \")\n print(\" -------------------------\")\n for x in range(0, 9):\n print(f\"{x} | \", end=\"\")\n for y in range(0, 9):\n if self.field[x][y] == -1:\n print(\"* \", end=\"\")\n else:\n print(f\"{self.field[x][y]} \", end=\"\")\n if y % 3 == 2:\n print(\"| \", end=\"\")\n print(\"\")\n if x % 3 == 2:\n print(\" -------------------------\")", "def show_all(img, overlay=None, axis='z'):\n xlen, ylen, zlen = img.GetSize()\n all_images = []\n all_overlays = []\n if axis == 'z':\n all_images = [img[:, :, z] for z in xrange(zlen)]\n if overlay:\n all_overlays = [overlay[:, :, z] for z in xrange(zlen)]\n elif axis == 'y':\n all_images = [img[:, y, :] for y in xrange(ylen)]\n if overlay:\n all_overlays = [overlay[:, y, :] for y in xrange(ylen)]\n elif axis == 'x':\n all_images = [img[x, :, :] for x in xrange(xlen)]\n if overlay:\n all_overlays = [overlay[x, :, :] for x in xrange(xlen)]\n else:\n raise Exception('invalid axis')\n\n for i, image in enumerate(all_images):\n if overlay:\n show_one(sitk.LabelOverlay(image, all_overlays[i]))\n else:\n show_one(image)\n plt.show()", "def print(self):\n # IMPLEMENT ME\n for i in range(self.height):\n for j in range(self.width):\n print(self.board[i][j], end=\" \")\n print()\n print()", "def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()", "def printDataRange(matrix):\n print(\"Samples\\tMin\\tMax\\tMedian\\t10th\\t90th\")\n for i, sample in enumerate(matrix.matrix.sample_labels):\n start = matrix.matrix.sample_boundaries[i]\n end = matrix.matrix.sample_boundaries[i + 1]\n sample_matrix = matrix.matrix.matrix[..., start:end]\n print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\".format(sample, np.amin(sample_matrix),\n np.amax(sample_matrix),\n np.ma.median(sample_matrix),\n np.percentile(sample_matrix, 10),\n np.percentile(sample_matrix, 90)))", "def print_cell_information(obj_ase_cell):\n # print the lattice vectors\n print('a1=',obj_ase_cell.cell[0,:])\n print('a2=',obj_ase_cell.cell[1,:])\n print('a3=',obj_ase_cell.cell[2,:])\n for i,a in enumerate(obj_ase_cell):\n print(i,a.symbol,a.position)", "def print(self):\n for row in self.board:\n print(row)", "def dumpResults(x,y,lon,lat):\n for i in range(0,len(x)):\n print(x[i],y[i],\"lonlat\",lon[i],lat[i])\n return", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def display(self):\n for i in range(0, len(self.top_row)):\n self.top_row[i].display()\n for i in range(0, len(self.bottom_row)):\n self.bottom_row[i].display()\n for i in range(0, len(self.left_col)):\n self.left_col[i].display()\n for i in range(0, len(self.right_col)):\n self.right_col[i].display()", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return" ]
[ "0.75990057", "0.7544907", "0.74450433", "0.73937047", "0.7349031", "0.7347187", "0.7033798", "0.68403655", "0.6811467", "0.66571254", "0.6630757", "0.6279358", "0.6275334", "0.61232245", "0.60401046", "0.5973948", "0.5973784", "0.5965042", "0.59583265", "0.59162855", "0.5894761", "0.5819436", "0.5811356", "0.58048147", "0.57793635", "0.57645047", "0.5754731", "0.57335156", "0.57335156", "0.5726376", "0.5715056", "0.57111317", "0.57064384", "0.56567574", "0.5577068", "0.55756503", "0.55694", "0.55646145", "0.55592704", "0.55557805", "0.5544266", "0.5533631", "0.5533631", "0.55232507", "0.55178654", "0.5515087", "0.5503338", "0.5490018", "0.5488094", "0.5487511", "0.5482174", "0.54544526", "0.5454091", "0.54539174", "0.5419365", "0.54145783", "0.53863937", "0.53683513", "0.5363214", "0.5358269", "0.5357344", "0.5318814", "0.53133845", "0.53115493", "0.53113484", "0.5308694", "0.52823716", "0.5281389", "0.52733016", "0.5255574", "0.5242868", "0.52380145", "0.52352977", "0.5234517", "0.5230952", "0.52300847", "0.5228469", "0.5226477", "0.52176017", "0.52133596", "0.51984113", "0.51942176", "0.51879305", "0.5177211", "0.51671976", "0.51668286", "0.51616746", "0.5159799", "0.515941", "0.51542795", "0.51455265", "0.51432824", "0.51390004", "0.5137368", "0.51322716", "0.5130539", "0.51247895", "0.5123635", "0.51206356", "0.51191825" ]
0.76172984
0
Print the data in slice iz, column iy of an image to standard out.
def print_col(input, iy=0, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice, y = %d col)" % (iz, iy) line = [] for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def print_seg_row_col(sp) : \n s, r, c = src_from_rc8x8(sp.peak_y_raw, sp.peak_x_raw)\n print('seg: %d, row: %.1f, col: %.1f' % (s, r, c))", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def collatz_print (w, i, j, v) :\n w.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def printImage(imageObject):\n # TODO\n pass", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def show_np(mat):\n for x in range(15):\n for y in range(15):\n if (x == 7) and (y == 7):\n print(\"\\033[%d;%d;%dm**\\033[0m\" % (0, 33, 41), end='')\n elif mat[x, y, 0] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 31, 41), end='')\n elif mat[x, y, 1] > 0:\n print(\"\\033[%d;%d;%dm \\033[0m\" % (0, 32, 42), end='')\n else:\n print(\" \", end='')\n print(\"\")", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def display(self):\n for row0 in range(self.y):\n print()\n for row in range(self.height):\n for column0 in range(self.x):\n print(\" \", end=\"\")\n for column in range(self.width):\n print(\"#\", end=\"\")\n print()", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def display(self):\n for row in self.tile_rows:\n print(row)", "def display(self):\n for i in range(self.y):\n print()\n for i in range(self.height):\n for k in range(self.x):\n print(' ', end='')\n for j in range(self.width):\n print('#', end='')\n print()", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def display(self):\n for b in range(self.y):\n print()\n for i in range(self.height):\n print(\" \" * self.x + \"#\" * self.width)", "def display(self):\n width = 1 + max(len(self.values[s]) for s in self.boxes)\n line = 'x'.join(['-'*(width*3)]*3)\n for r in self.rows:\n print(''.join(self.values[r+c].center(width)+('|' if c in '36' else '')\n for c in self.cols))\n if r in 'CF': print(line)\n print", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def header(self):\n print 'dimensions',self.data.shape\n print 'llcorner', self.xllcorner, self.yllcorner\n print 'cell size', self.cellsize", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)", "def print(self):\n self.__print_local(self.dataset, 0)", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def output(self):\n width = \" \" # 6 spaces formatting\n print(\"\\n\\n\")\n for row in range(self._length, -1, -1):\n if row != 0:\n print(row, end = width)\n for col in range(0, self._length):\n #print(self.board[col][row - 1], end = width)\n self.board[col][row-1].output(width)\n print(\"\\n\\n\")\n else:\n print(width, end=\" \")\n for col in self.columns:\n print(col, end = width)\n print(\"\\n\\n\")", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def displayData(indices_to_display = None):\n width, height = 20, 20\n nrows, ncols = 10, 10\n if not indices_to_display:\n indices_to_display = random.sample(range(X.shape[0]), nrows*ncols)\n \n big_picture = np.zeros((height*nrows,width*ncols))\n \n irow, icol = 0, 0\n for idx in indices_to_display:\n if icol == ncols:\n irow += 1\n icol = 0\n iimg = getDatumImg(X[idx])\n big_picture[irow*height:irow*height+iimg.shape[0], icol*width:icol*width+iimg.shape[1]] = iimg\n icol += 1\n fig = plt.figure(figsize=(6,6))\n\n big_picture = (big_picture * 255).astype(np.int8)\n img = Image.fromarray(big_picture, mode='L')\n plt.imshow(img, cmap = cm.Greys)", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def display(self):\n for i in range(self.__y):\n print()\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)", "def write_window(img, ds, window):\n new_img = np.array([img[:, :, i] for i in range(img.shape[2])])\n ds.write(new_img, window=window)", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def print(self):\n print(\" 0 1 2 3 4 5 6 7 8 \")\n print(\" -------------------------\")\n for x in range(0, 9):\n print(f\"{x} | \", end=\"\")\n for y in range(0, 9):\n if self.field[x][y] == -1:\n print(\"* \", end=\"\")\n else:\n print(f\"{self.field[x][y]} \", end=\"\")\n if y % 3 == 2:\n print(\"| \", end=\"\")\n print(\"\")\n if x % 3 == 2:\n print(\" -------------------------\")", "def print_data(place):\n raise NotImplementedError", "def display(self):\n [print() for i in range(self.__y)]\n for i in range(self.__height):\n [print(\" \", end=\"\") for i in range(self.__x)]\n for j in range(self.__width):\n print(\"#\", end=\"\")\n print()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def __str__(self):\n result = ''\n for row in range(self.getHeight()):\n for col in range(self.getWidth()):\n result += str(self.data[row][col]) + ' '\n result += '\\n'\n return result", "def print(self):\n # IMPLEMENT ME\n for i in range(self.height):\n for j in range(self.width):\n print(self.board[i][j], end=\" \")\n print()\n print()", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def print(self):\r\n base = 8 * self.width\r\n print(base * \"-\")\r\n for x in range(self.height):\r\n output = \"\"\r\n for y in range(self.width):\r\n output = output + self.board[x][y] + \"|\"\r\n print(\"|\" + output)\r\n print(base * \"-\")", "def show_slices(slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")", "def display(self):\n result = []\n horizontal_bounds, vertical_bounds = self.get_bounds()\n min_x, max_x = horizontal_bounds\n min_y, max_y = vertical_bounds\n\n # xrange is inclusive in start and exclusive in end, IE [s..e)\n # so we add +1 offset to be safe\n for y in xrange(max_y, min_y - 1, -1):\n # since we have three rows and we're still relying on print,\n # displaying gets a bit dirty\n # will get cleaner once we move to something like HTML\n row_tiles = [self.tile(x, y) for x in xrange(min_x, max_x + 1)]\n\n # now we have to print each of the three rows together.\n # zip to aggregate each of the top, middle, bottom rows\n row_lines = zip(*[str(tile).split(\"\\n\") for tile in row_tiles])\n for line in row_lines:\n result.append(\"\".join(line))\n\n return \"\\n\".join(result)", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def imdisplay(filename, representation):\n image = read_image(filename, representation)\n\n if representation == GRAY_OUT:\n plt.imshow(image, cmap='gray')\n else:\n plt.imshow(image)\n\n plt.show()", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def print_vector(self):\n print self.x, self.y, self.z", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def print_cell_information(obj_ase_cell):\n # print the lattice vectors\n print('a1=',obj_ase_cell.cell[0,:])\n print('a2=',obj_ase_cell.cell[1,:])\n print('a3=',obj_ase_cell.cell[2,:])\n for i,a in enumerate(obj_ase_cell):\n print(i,a.symbol,a.position)", "def print_row(row,writer,x):\n sys.stdout.write(unichr(0x2503))\n for n in xrange(row.shape[0]-1):\n writer(row[n],Width,(x,n))\n sys.stdout.write(unichr(0x2502))\n if row.shape[0] > 0:\n writer(row[-1],Width,(x,row.shape[0]-1))\n sys.stdout.write(unichr(0x2503) + '\\n')", "def show_slices(self, slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")" ]
[ "0.77487314", "0.750461", "0.7496404", "0.73919225", "0.7150529", "0.7141496", "0.7027633", "0.6667874", "0.66633767", "0.6654244", "0.6348636", "0.621092", "0.6179944", "0.60063565", "0.59464717", "0.59386533", "0.5911687", "0.58938134", "0.5851181", "0.58338124", "0.58030814", "0.580005", "0.5770217", "0.57680136", "0.57322097", "0.5687016", "0.56508046", "0.56508046", "0.56486964", "0.5631367", "0.56241935", "0.56231993", "0.56194925", "0.5618533", "0.5608584", "0.5608584", "0.56033283", "0.55967665", "0.5579285", "0.55709326", "0.55670226", "0.5565117", "0.55197614", "0.5506052", "0.54795337", "0.5455547", "0.54323965", "0.5431751", "0.5415518", "0.54134214", "0.54053307", "0.5384232", "0.53834504", "0.53793746", "0.53791755", "0.53523403", "0.5348986", "0.53487146", "0.5321047", "0.5319799", "0.53061527", "0.52997816", "0.52917933", "0.5286587", "0.5281699", "0.5278258", "0.52758783", "0.5273376", "0.52664584", "0.52657247", "0.525592", "0.5242683", "0.5238663", "0.5227683", "0.5221043", "0.5217919", "0.5201372", "0.5196144", "0.51943666", "0.5190706", "0.5189621", "0.51893604", "0.5183344", "0.51832896", "0.5175574", "0.51731855", "0.5168432", "0.5162843", "0.5161328", "0.51551276", "0.5155126", "0.514921", "0.5142674", "0.5141334", "0.5140754", "0.5138634", "0.51380163", "0.51371795", "0.5120729", "0.5120167" ]
0.75519806
1
Print the data in slice iz of an image to standard out.
def print_slice(input, iz=0): image=get_image(input) nx = image.get_xsize() ny = image.get_ysize() nz = image.get_zsize() print "(z = %d slice)" % (iz) line = [] for iy in xrange(ny): line.append("Row ") line.append("%4i " % iy) for ix in xrange(nx): line.append("%12.5g " % (image.get_value_at(ix,iy,iz))) if ((ix + 1) % 5 == 0): line.append("\n ") line.append(" ") line.append("\n") if(nx%5 != 0): line.append("\n") print "".join(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_row(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_col(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice_3d(input, num=0,direction=\"z\"):\n\t#print \"print slice at 3 directions\"\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tif(direction==\"x\"):\n\t\t#print \"xxxxx\"\n\t\tix=num\n\t\tprint \"(x = %d slice)\" % (ix)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor iy in xrange(ny):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((iy + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(ny%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telif(direction==\"y\"):\n\t\t#print \"yyy\"\n\t\tiy=num\n\t\tprint \"(y = %d slice)\" % (iy)\n\t\tline = []\n\t\tfor iz in xrange(nz-1,-1,-1):\n\t\t\tline.append(\"Z \")\n\t\t\tline.append(\"%4i \" % iz)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)\n\telse:\n\t\t#print \"zzzz\"\n\t\tiz=num\n\t\tprint \"(z = %d slice)\" % (iz)\n\t\tline = []\n\t\tfor iy in xrange(ny-1,-1,-1):\n\t\t\tline.append(\"Row \")\n\t\t\tline.append(\"%4i \" % iy)\n\t\t\tfor ix in xrange(nx):\n\t\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\t\tline.append(\"\\n \")\n\t\t\t\t\tline.append(\" \")\n\t \t\tline.append(\"\\n\")\n\t \t\tif(nx%5 != 0): line.append(\"\\n\")\n\t\tprint \"\".join(line)", "def dump_row(input, fname, ix=0, iz=0):\n\tfout = open(fname, \"w\")\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tfout.write(\"# z = %d slice, x = %d row)\\n\" % (iz, ix))\n\tline = []\n\tfor iy in xrange(ny):\n\t\tfout.write(\"%d\\t%12.5g\\n\" % (iy, image.get_value_at(ix,iy,iz)))\n\tfout.close()", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def print_slice(complex_slice, name = None):\n if name: print(\"***********\" + name + \"**********\")\n slice = complex_slice.clone()\n slice = slice.detach()\n slice = slice.cpu()\n slice = slice.permute(0, 2, 3, 1)\n slice = slice.squeeze()\n slice_image_abs = fastmri.complex_abs(slice)\n plt.imshow(slice_image_abs, cmap = 'gray')\n plt.show()", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def printImage(imageObject):\n # TODO\n pass", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def show_slice(file_path, x, y, z):\n img = nib.load(file_path)\n img = img.get_fdata()\n print(\"The scan has dimensions {}.\".format(img.shape))\n slice_0 = img[x, :, :]\n slice_1 = img[:, y, :]\n slice_2 = img[:, :, z]\n slices = [slice_0, slice_1, slice_2]\n fig, axes = plt.subplots(1, len(slices), figsize=[12, 4])\n for i, sli in enumerate(slices):\n axes[i].imshow(sli.T, cmap=\"gray\", origin=\"lower\")", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def show_ipv(data: np.ndarray):\n import ipyvolume as ipv\n return ipv.quickvolshow(data)", "def explore_data(dataset, start, end, rows_and_columns=False):\r\n for i in range(start,end):\r\n print(dataset[i],end=\"\\n\")", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def print(self):\n self.__print_local(self.dataset, 0)", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<ndata]\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def printdatarow(dat, iteration):\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):\r\n\r\n filenameprefix=self.name_prefix\r\n\r\n def printdatarow(dat, iteration):\r\n \"\"\"print data of iteration i\"\"\"\r\n i = np.where(dat.f[:, 0] == iteration)[0][0]\r\n j = np.where(dat.std[:, 0] == iteration)[0][0]\r\n print('%5d' % (int(dat.f[i,0])) + ' %6d' % (int(dat.f[i,1])) + ' %.14e' % (dat.f[i,5]) +\r\n ' %5.1e' % (dat.f[i,3]) +\r\n ' %6.2e' % (max(dat.std[j,5:])) + ' %6.2e' % min(dat.std[j,5:]))\r\n\r\n dat = CMADataLogger(filenameprefix).load()\r\n ndata = dat.f.shape[0]\r\n\r\n # map index to iteration number, is difficult if not all iteration numbers exist\r\n # idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long\r\n # otherwise:\r\n if idx is None:\r\n idx = 100\r\n if np.isscalar(idx):\r\n # idx = np.arange(0, ndata, idx)\r\n if idx:\r\n idx = np.r_[0, 1, idx:ndata-3:idx, -3:0]\r\n else:\r\n idx = np.r_[0, 1, -3:0]\r\n\r\n idx = array(idx)\r\n idx = idx[idx<=ndata] # TODO: shouldn't this be \"<\"?\r\n idx = idx[-idx<=ndata]\r\n iters = dat.f[idx, 0]\r\n idxbest = np.argmin(dat.f[:,5])\r\n iterbest = dat.f[idxbest, 0]\r\n if len(iters) == 1:\r\n printdatarow(dat, iters[0])\r\n else:\r\n self.disp_header()\r\n for i in iters:\r\n printdatarow(dat, i)\r\n self.disp_header()\r\n printdatarow(dat, iterbest)\r\n sys.stdout.flush()", "def printdata(self,whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.printdata(whichstream_,firsti_,lasti_,firstj_,lastj_,firstk_,lastk_,c_,qo_,a_,qc_,bc_,bx_,vartype_,cones_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def write_window(img, ds, window):\n new_img = np.array([img[:, :, i] for i in range(img.shape[2])])\n ds.write(new_img, window=window)", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def print_data(self, data):\n self.print_indicator = True\n self.imagedata = data\n self.setImage(self.imagedata)\n\n self.indicator_min = -200\n self.indicator_max = 200\n\n if self.video_model != None:\n pos = int(self.video_model.get_pos(datatype = \"motion\"))\n self.indicator = self.view.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def printImage(currentImage):\n\tprint currentImage + ' is set to be printed...'", "def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def show_slices(slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")", "def show_slices(self, slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")", "def show_all(img, overlay=None, axis='z'):\n xlen, ylen, zlen = img.GetSize()\n all_images = []\n all_overlays = []\n if axis == 'z':\n all_images = [img[:, :, z] for z in xrange(zlen)]\n if overlay:\n all_overlays = [overlay[:, :, z] for z in xrange(zlen)]\n elif axis == 'y':\n all_images = [img[:, y, :] for y in xrange(ylen)]\n if overlay:\n all_overlays = [overlay[:, y, :] for y in xrange(ylen)]\n elif axis == 'x':\n all_images = [img[x, :, :] for x in xrange(xlen)]\n if overlay:\n all_overlays = [overlay[x, :, :] for x in xrange(xlen)]\n else:\n raise Exception('invalid axis')\n\n for i, image in enumerate(all_images):\n if overlay:\n show_one(sitk.LabelOverlay(image, all_overlays[i]))\n else:\n show_one(image)\n plt.show()", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def print_images(images,output_dir,image_num=0,pair=False,synth_images=None):\n for i in xrange(images.shape[0]):\n to_print = fix_image(images[i])\n\n if pair and synth_images is not None:\n synth_to_print = fix_image(synth_images[i])\n to_print = np.hstack((to_print,synth_to_print))\n\n #What is the name of the image?\n imsave(os.path.join(output_dir,str(image_num + i) + \".png\"), to_print)", "def print_data(place):\n raise NotImplementedError", "def imshow(self, depth):\n layer = self.cube[depth]\n img = []\n for i in range(self.height):\n img.append([layer[i][j].value for j in range(self.width)])\n plt.imshow(img, cmap='gray')\n plt.show()", "def print_wrapped(data, ncols=3):\r\n nrows = len(data)\r\n labels = data.index\r\n n_split_rows = int(np.ceil(nrows / ncols))\r\n for r in range(0, nrows, ncols):\r\n for c in range(ncols):\r\n try:\r\n numstr = '{}'.format(data[r + c])\r\n tabs = [' '] * (20 - len(labels[r + c]) - len(numstr))\r\n print(labels[r + c] + \"\".join(tabs) + numstr, end='\\t')\r\n except:\r\n pass\r\n print()", "def display_mask(i):\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)", "def show_slices(slices):\n fig, axes = plt.subplots(1, len(slices))\n for i, slice in enumerate(slices):\n axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def output(*args):\n print(*args, end='', file=file)", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def display(self, image):\n raise NotImplementedError()", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def printwf(data):\n print data #replace for Py3\n sys.stdout.flush()\n sys.stderr.flush()", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def show_one(img):\n dpi = 40\n margin = 0.05\n nda = sitk.GetArrayFromImage(img)\n spacing = img.GetSpacing()\n extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0)\n figsize = (5, 5)\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])\n\n plt.set_cmap(\"gray\")\n ax.imshow(nda, extent=extent, interpolation=None)", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def printInterfaces(self, idx = None, flag = None, anchor = \"\"):\n\n if idx is None:\n idx = range(self.atoms.shape[0])\n elif isinstance(idx, (int, np.integer)):\n idx = [idx]\n\n header1 = \"%-2s%6s | %3s %-9s | %5s | %5s | %-5s | %-5s | %4s %-25s | %3s %-11s | %3s %-11s \"\\\n % (\"\", \"Index\", \"\", \"Length\", \"Angle\", \"Angle\", \"Area\", \"Atoms\", \"\", \"Epsilon (*100)\",\\\n \"\", \"Lattice 1\", \"\", \"Lattice 2\")\n header2 = \"%-2s%6s | %6s, %5s | %5s | %5s | %6s | %5s | %7s,%7s,%7s,%6s | \"\\\n \"%3s,%3s,%3s,%3s | %3s,%3s,%3s,%3s\"\\\n % (\"\", \"i\", \"a1\", \"a2\", \"b1/b2\", \"a1/a2\", \"Ang^2\", \"Nr\", \"11\", \"22\", \"12\", \"mas\",\\\n \"a1x\", \"a1y\", \"a2x\", \"a2y\", \"b1x\", \"b1y\", \"b2x\", \"b2y\")\n\n div = \"=\" * len(header1)\n print(\"\\n\" + header1 + \"\\n\" + header2 + \"\\n\" + div)\n\n for i in idx:\n\n la = np.linalg.norm(self.cell_1[i, :, :], axis = 0)\n lb = np.linalg.norm(self.cell_2[i, :, :], axis = 0)\n\n aa = np.dot(self.cell_1[i, :, 0], self.cell_1[i, :, 1]) / (la[0] * la[1])\n aa = np.rad2deg(np.arccos(aa))\n\n ba = np.dot(self.cell_2[i, :, 0], self.cell_2[i, :, 1]) / (lb[0] * lb[1])\n ba = np.rad2deg(np.arccos(ba))\n\n ar = np.abs(np.sin(np.deg2rad(aa))) * la[0] * la[1]\n\n s1 = self.eps_11[i] * 100\n s2 = self.eps_22[i] * 100\n s3 = self.eps_12[i] * 100\n s4 = self.eps_mas[i] * 100\n\n ra = self.rep_1[i, :, :].flatten()\n rb = self.rep_2[i, :, :].flatten()\n\n at = self.atoms[i]\n\n if np.isin(i, flag):\n string = \"%-2s%6.0f * %6.1f,%6.1f * %5.1f * %5.1f * %6.1f * %5.0f * \"\\\n \"%7.2f,%7.2f,%7.2f,%6.2f * %3i,%3i,%3i,%3i * %3i,%3i,%3i,%3i\"\\\n % (anchor, i, la[0], la[1], ba, aa, ar, at, s1, s2, s3, s4,\\\n ra[0], ra[2], ra[1], ra[3], rb[0], rb[2], rb[1], rb[3])\n else:\n string = \"%-2s%6.0f | %6.1f,%6.1f | %5.1f | %5.1f | %6.1f | %5.0f | \"\\\n \"%7.2f,%7.2f,%7.2f,%6.2f | %3i,%3i,%3i,%3i | %3i,%3i,%3i,%3i\"\\\n % (anchor, i, la[0], la[1], ba, aa, ar, at, s1, s2, s3, s4,\\\n ra[0], ra[2], ra[1], ra[3], rb[0], rb[2], rb[1], rb[3])\n\n print(string)\n\n print(div + \"\\n\")", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def print_out():\n pass", "def __writeImageBytes(self, image):\n\n if not image:\n raise Exception(\"image not found\")\n result = []\n for i, b in enumerate(image):\n if i % 39 == 0:\n result.append(\"\\n\")\n result.append(f\"{b:02X}\")\n return \"\".join(result)", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def show_points_on_img(mask,img):\n labeled, num_objects = ndi.label(mask)\n slices = ndi.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2 \n y.append(y_center)\n plt.figure()\n plt.imshow(img)\n plt.autoscale(False)\n plt.plot(x,y, \"o\")", "def collatz_print(w, i, j, v):\n\tw.write(str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\")", "def display(self, image=None):\n # By default write the internal buffer to the display.\n if image is None:\n image = self.buffer\n # Set address bounds to entire display.\n self.setAddress()\n # Convert image to array of 16bit 565 RGB data bytes.\n # Unfortunate that this copy has to occur, but the SPI byte writing\n # function needs to take an array of bytes and PIL doesn't natively\n # store images in 16-bit 565 RGB format.\n pixelbytes = list(image_to_data(image))\n # Write data to hardware.\n self.writeData(pixelbytes)", "def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()", "def display(self):\n for row in self.tile_rows:\n print(row)", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return", "def plot_slice(path: str, image: sitk.Image, slice_no: int) -> None:\n\n slice_ = sitk.GetArrayFromImage(image[:, :, slice_no])\n\n fig = plt.figure()\n # configure axes such that no boarder is plotted\n # refer to https://github.com/matplotlib/matplotlib/issues/7940/ about how to remove axis from plot\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n ax.margins(0)\n ax.tick_params(which='both', direction='in')\n\n # plot image\n ax.imshow(slice_, 'gray', interpolation='none')\n\n fig.add_axes(ax)\n\n extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n plt.savefig(path, bbox_inches=extent)\n plt.close()", "def dump(self, data_points):\n print(data_points)", "def my_print(self):\n if self.__size is not 0:\n for ite in range(self.__position[1]):\n print()\n for ite in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.size)\n else:\n print()", "def print_adni_qc(outputdir, data, title):\n # extract filename for title\n title = os.path.basename(title)\n\n maximum = np.max(data)\n plt.imshow(data, cmap=plt.cm.jet, interpolation='nearest', vmin=0.15*maximum, vmax=0.75*maximum)\n plt.colorbar()\n plt.title(os.path.basename(title), fontsize=8)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.savefig(os.path.join(outputdir, '{}.jpg'.format(title)))\n plt.close()", "def print_list(data):\n for i, line in enumerate(data):\n print(\"Linha {}: {}\".format(i, line))", "def log(self, step, data=''):\n if self.debug:\n print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'\n for k in range(0, len(step), 68):\n print '+{:^68.68}+'.format(step[k:k + 68])\n for k in range(0, len(data), 68):\n print '+{:^68.68}+'.format(data[k:k + 68])\n print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'\n print", "def dump(self) -> NoReturn:\n index = self._head\n while index:\n print(index.data, end=\" \")\n index = index.next" ]
[ "0.7832976", "0.7438628", "0.72238773", "0.70643157", "0.70205015", "0.6971101", "0.659192", "0.65685576", "0.6525086", "0.64007616", "0.6349104", "0.6105353", "0.6058925", "0.6016299", "0.60077834", "0.59170806", "0.5827092", "0.5820637", "0.5805464", "0.58032835", "0.57411206", "0.5733659", "0.57312804", "0.56618387", "0.56552017", "0.55842036", "0.5539581", "0.5538914", "0.55242884", "0.5521108", "0.548123", "0.5418916", "0.5418916", "0.5415355", "0.54050523", "0.53752536", "0.53599876", "0.53542846", "0.5341357", "0.5333343", "0.531496", "0.53043544", "0.5295613", "0.5268808", "0.5260194", "0.52577215", "0.52549523", "0.52437985", "0.5228753", "0.5228753", "0.5216811", "0.52108616", "0.5204943", "0.51949215", "0.51809996", "0.5180164", "0.51296836", "0.51235956", "0.5120301", "0.50993127", "0.509749", "0.5094299", "0.5094232", "0.5091468", "0.50899243", "0.50882506", "0.5086855", "0.5067222", "0.5065791", "0.5056878", "0.5055202", "0.504992", "0.503669", "0.5036671", "0.503556", "0.5034959", "0.50338626", "0.5002598", "0.500236", "0.49977767", "0.49975577", "0.49908924", "0.49865043", "0.49824145", "0.4981608", "0.4970869", "0.4960709", "0.49506357", "0.49324876", "0.49282718", "0.49280524", "0.49279028", "0.49270505", "0.49262777", "0.49204618", "0.4919497", "0.49174082", "0.49165586", "0.49149203", "0.491347" ]
0.7674774
1
Print the data in an image to standard out.
def print_image(input): image=get_image(input) nz = image.get_zsize() for iz in xrange(nz): print_slice(input, iz)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printImage(imageObject):\n # TODO\n pass", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def print_image(data, index):\n image = data[index]\n for line in image:\n print(line)", "def print_image(self, width, data):\n if len(data) % width:\n raise ValueError('Bad image format, length of data must be '\n 'divisible by width.')\n height = len(data) / width\n\n # send line-by-line\n for row in range(height):\n self.send_command('print_bitmap', 1, width)\n self.port.write(data[row*width:(row+1)*width], is_text=False)\n self.port.fed_dots(1)", "def print_image(self, pil_image):\n bw_image = pil_image.convert('1', dither=Image.FLOYDSTEINBERG)\n width = bw_image.width\n height = bw_image.height\n img_bytes = list(bw_image.getdata())\n self.print_bitmap(width, height, img_bytes)", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def print_image(img):\r\n # On affiche l'image\r\n plt.figure(figsize=(20, 5))\r\n plt.subplot(1, 2, 1)\r\n plt.imshow(img)\r\n # On affiche l'histogramme\r\n plt.subplot(1, 2, 2)\r\n plt.hist(img.flatten(), bins=range(256))\r\n plt.show()", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_real_image(r_img, name = None, batch_size = BATCH_SIZE):\n if name: print(\"***********\" + name + \"**********\")\n to_print = []\n c_img = r_img.clone()\n for i in range(batch_size):\n img = c_img[i,:,:]\n img = torch.squeeze(img)\n img = img.detach()\n img = img.cpu()\n plt.imshow(img, cmap = 'gray')\n plt.show()", "def print_image_info(input_image):\n print()\n print(\"Basic Information on image: {}\".format(input_image.filename))\n print(\"Format: {}\".format(input_image.format))\n print(\"Mode: {}\".format(input_image.mode))\n print(\"Size: {}\".format(input_image.size))\n print(\"Width: {}\".format(input_image.width))\n print(\"Height: {}\".format(input_image.height))\n print(\"Palette: {}\".format(input_image.palette))\n print()", "def printImage(currentImage):\n\tprint currentImage + ' is set to be printed...'", "def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def printMat(image):\n for row in range(image.rows):\n print \"[\",\n for col in range(image.cols):\n print cv.mGet(image, row, col),\n print \"]\"\n print \"\"", "def show_image(image):\n print('-' * (len(image) + 4))\n for line in image:\n print('| ', end='')\n for ch in line:\n char = '#' if ch is True else '.'\n print(char, end='')\n print(' |')\n print('-' * (len(image) + 4))", "def display(self):\n display(self.image)", "def show_input_to_output(img_ns):\n figure()\n \n sp = subplot(1, 2, 1).imshow(img_ns.img)\n sp.axes.grid(False)\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Input Image\", fontsize=10);\n outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size)\n sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8));\n sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0)\n title(\"Output Image From Tiles\", fontsize=10);\n sp.axes.grid(False)\n #print(outimg.astype(np.uint8))\n #print(img_ns)\n plt.savefig(img_ns.output_filename + \"_input_to_output.pdf\", bbox_inches=\"tight\")\n plt.close()", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: Any):\n raise NotImplementedError", "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def print_stats(cars, notcars):\n print(\"Number of car samples: {0}\".format(len(cars)))\n print(\"Number of non car samples: {0}\".format(len(notcars)))\n img = cv2.imread(cars[0])\n print(\"Image shape: {0}x{1}\".format(img.shape[0], img.shape[1]))\n print(\"Image datatype: {}\".format(img.dtype))", "def imshow(image):\n iio.imshow(dtype.im2uint(image))", "def _convert_and_print_image(self, im):\n pixLine = \"\"\n imLeft = \"\"\n imRight = \"\"\n switch = 0\n imgSize = [0, 0]\n\n if im.size[0] > 512:\n print (\"WARNING: Image is wider than 512 and could be truncated at print time \")\n if im.size[1] > 255:\n raise ValueError(\"Image Height larger than 255\")\n\n imBorder = self._check_image_size(im.size[0])\n for i in range(imBorder[0]):\n imLeft += \"0\"\n for i in range(imBorder[1]):\n imRight += \"0\"\n\n for y in range(im.size[1]):\n imgSize[1] += 1\n pixLine += imLeft\n imgSize[0] += imBorder[0]\n for x in range(im.size[0]):\n imgSize[0] += 1\n RGB = im.getpixel((x, y))\n imColor = (RGB[0] + RGB[1] + RGB[2])\n imPattern = \"1X0\"\n patternLen = len(imPattern)\n switch = (switch - 1) * (-1)\n for x in range(patternLen):\n if imColor <= (255 * 3 / patternLen * (x + 1)):\n if imPattern[x] == \"X\":\n pixLine += \"%d\" % switch\n else:\n pixLine += imPattern[x]\n break\n elif imColor > (255 * 3 / patternLen * patternLen) and imColor <= (255 * 3):\n pixLine += imPattern[-1]\n break\n pixLine += imRight\n imgSize[0] += imBorder[1]\n\n self._print_image(pixLine, imgSize)", "def display(self, image):\n raise NotImplementedError()", "def _print_image(self, line, size):\n i = 0\n cont = 0\n\n self._write(self.__class__.__imageSize['1x1'])\n buffer = bytearray([int((size[0] / size[1]) / 8), 0, size[1], 0])\n\n self._write(buffer)\n buffer = bytearray()\n\n while i < len(line):\n hex_string = int(line[i:i + 8], 2)\n buffer.append(hex_string)\n i += 8\n cont += 1\n if cont % 4 == 0:\n self._write(buffer)\n buffer = bytearray()\n cont = 0", "def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def show_image(image):\r\n plt.imshow(image, cmap='gray')\r\n plt.show()", "def showImg(img, binary=True, fName=''):\n img = img[0, 0, :, :]\n\n if binary:\n img = img > 0.5\n\n img = Image.fromarray(np.uint8(img * 255), mode='L')\n\n if fName:\n img.save('assets/' + fName + '.png')\n else:\n img.show()", "def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()", "def print_adni_qc(outputdir, data, title):\n # extract filename for title\n title = os.path.basename(title)\n\n maximum = np.max(data)\n plt.imshow(data, cmap=plt.cm.jet, interpolation='nearest', vmin=0.15*maximum, vmax=0.75*maximum)\n plt.colorbar()\n plt.title(os.path.basename(title), fontsize=8)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.savefig(os.path.join(outputdir, '{}.jpg'.format(title)))\n plt.close()", "def show(self) -> None:\n cv.imshow(str(self.__class__), self.output_image)", "def display_data():\n data = read_data_from_file()\n data_size = len(data)\n print('Data size - ' + str(data_size))\n\n\n for i in range(data_size-1, 0, -1):\n image = data[i]\n cv2.imshow('window', image[0])\n print(str(image[1]))\n cv2.waitKey(50)", "def print_bitmap(self, w, h, image):\n\n bitmap = self._pack_bitmap(w, h, image)\n\n row_bytes = (w + 7) // 8 # Round up to next byte boundary\n\n if row_bytes >= 48:\n row_bytes_clipped = 48\n else:\n row_bytes_clipped = row_bytes # 384 pixels max width\n\n # Est. max rows to write at once, assuming 256 byte printer buffer.\n if self._dtr_enabled:\n chunk_height_limit = 255 # Buffer doesn't matter, handshake!\n else:\n chunk_height_limit = 256 // row_bytes_clipped\n if chunk_height_limit > self._max_chunk_height:\n chunk_height_limit = self._max_chunk_height\n elif chunk_height_limit < 1:\n chunk_height_limit = 1\n\n row_start = 0\n i = 0\n while row_start < h:\n # Issue up to chunkHeightLimit rows at a time:\n chunk_height = h - row_start\n if chunk_height > chunk_height_limit:\n chunk_height = chunk_height_limit\n\n self.write(self.ASCII_DC2, '*', chunk_height, row_bytes_clipped)\n\n y = 0\n while y < chunk_height:\n x = 0\n while x < row_bytes_clipped:\n self.timeout_wait()\n self._send_to_printer(int(bitmap[i]))\n x += 1\n i += 1\n\n y += 1\n\n i += row_bytes - row_bytes_clipped\n\n self.timeout_set(chunk_height * self._dot_print_time)\n\n row_start += chunk_height_limit\n\n self._prev_byte = '\\n'", "def show_env(self, img):\n plt.figure(1)\n plt.subplot(111)\n plt.imshow(img, interpolation=\"nearest\")\n plt.show()", "def img_disp(name,img):\n cv2.imshow(name,img.astype(int)/255.0)\n cv2.waitKey()", "def display(self, image=None):\n # By default write the internal buffer to the display.\n if image is None:\n image = self.buffer\n # Set address bounds to entire display.\n self.setAddress()\n # Convert image to array of 16bit 565 RGB data bytes.\n # Unfortunate that this copy has to occur, but the SPI byte writing\n # function needs to take an array of bytes and PIL doesn't natively\n # store images in 16-bit 565 RGB format.\n pixelbytes = list(image_to_data(image))\n # Write data to hardware.\n self.writeData(pixelbytes)", "def show(type,img):\n # print(img)\n cv2.imshow(type, img)\n cv2.waitKey()", "def imdisplay(filename, representation):\n image = read_image(filename, representation)\n\n if representation == GRAY_OUT:\n plt.imshow(image, cmap='gray')\n else:\n plt.imshow(image)\n\n plt.show()", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n self.command(\n const.COLUMNADDR, 0x00, self.width-1, # Column start/end address\n const.PAGEADDR, 0x00, self.pages-1) # Page start/end address\n\n pix = list(image.getdata())\n step = self.width * 8\n buf = []\n for y in xrange(0, self.pages * step, step):\n i = y + self.width-1\n while i >= y:\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[i + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n i -= 1\n\n self.data(buf)", "def print_image_row(input, iy=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, y = %d col)\" % (iz, iy)\n\tline = []\n\tfor ix in xrange(nx):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((ix + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def print_data(self, data):\n self.print_indicator = True\n self.imagedata = data\n self.setImage(self.imagedata)\n\n self.indicator_min = -200\n self.indicator_max = 200\n\n if self.video_model != None:\n pos = int(self.video_model.get_pos(datatype = \"motion\"))\n self.indicator = self.view.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def dump_image(image, path_image):\n cv2.imwrite(path_image, image)\n return", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def print(self):\n self.__print_local(self.dataset, 0)", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def _print_img_size(self, img):\n width, height = img.size\n print('{}, {}'.format(width, height))", "def display(array):\n if isinstance(array, np.ndarray):\n plt.imshow(array)\n plt.show()\n else:\n raise TypeError(\"display() needs a numpy ndarray as parameter, \"\n f\"got {type(array)}\")", "def display(image, name=\"Image\"):\n cv2.imshow(name, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite(\"{}.png\".format(name), image)", "def show(self, exec_rasterize = False):\n\n if (exec_rasterize):\n self.rasterize()\n\n Image.fromarray(self._image).show()", "def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')", "def write(self, image):\n raise NotImplementedError()", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: List[DetectObject]) -> np.ndarray:\n draw_layer_tag = f'draw_layer_{self.id}_{self.draw_layer_index^1}'\n\n self.layer = dpg.add_draw_layer(\n parent=f'main_window_{self.id}',\n tag=draw_layer_tag,\n show=False\n )\n for out in output_data:\n description = f'{out.clsname} [{out.score*100:.2f}%]'\n # add alpha and convert to RGB\n color = np.array(self.class_colors[out.clsname])\n color = tuple((255*color).astype(np.uint8))\n dpg.draw_rectangle(\n parent=draw_layer_tag,\n pmin=(out.xmin*self.width,\n out.ymin*self.height),\n pmax=(out.xmax*self.width,\n out.ymax*self.height),\n color=color,\n thickness=2\n )\n dpg.draw_text(\n parent=draw_layer_tag,\n text=description,\n pos=(out.xmin*self.width + _PADDING,\n out.ymin*self.height + _PADDING),\n color=color,\n size=_FONT_SIZE\n )\n\n return img", "def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()", "def print_image_col(input, ix=0, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice, x = %d row)\" % (iz, ix)\n\tline = []\n\tfor iy in xrange(ny):\n\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\tif ((iy + 1) % 5 == 0): line.append(\"\\n \")\n\tline.append(\"\\n\")\n\tprint \"\".join(line)", "def print_image_slice(input, iz=0):\n\timage=get_image(input)\n\tnx = image.get_xsize()\n\tny = image.get_ysize()\n\tnz = image.get_zsize()\n\tprint \"(z = %d slice)\" % (iz)\n\tline = []\n\tfor iy in xrange(ny-1,-1,-1):\n\t\tline.append(\"Row \")\n\t\tline.append(\"%4i \" % iy)\n\t\tfor ix in xrange(nx):\n\t\t\tline.append(\"%12.5g \" % (image.get_value_at(ix,iy,iz)))\n\t\t\tif ((ix + 1) % 5 == 0): \n\t\t\t\tline.append(\"\\n \")\n\t\t\t\tline.append(\" \")\n\t \tline.append(\"\\n\")\n\t \tif(nx%5 != 0): line.append(\"\\n\")\n\tprint \"\".join(line)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def display(self, image):\n assert(image.mode == '1')\n assert(image.size[0] == self.width)\n assert(image.size[1] == self.height)\n\n page = 0xB0\n pix = list(image.getdata())\n step = self.width * 8\n for y in xrange(0, self.pages * step, step):\n\n # move to given page, then reset the column address\n self.command(page, 0x02, 0x10)\n page += 1\n\n buf = []\n for x in xrange(self.width):\n byte = 0\n for n in xrange(0, step, self.width):\n byte |= (pix[x + y + n] & 0x01) << 8\n byte >>= 1\n\n buf.append(byte)\n\n self.data(buf)", "def print_images(images,output_dir,image_num=0,pair=False,synth_images=None):\n for i in xrange(images.shape[0]):\n to_print = fix_image(images[i])\n\n if pair and synth_images is not None:\n synth_to_print = fix_image(synth_images[i])\n to_print = np.hstack((to_print,synth_to_print))\n\n #What is the name of the image?\n imsave(os.path.join(output_dir,str(image_num + i) + \".png\"), to_print)", "def show_picture(self, data):\n raise NotImplementedError", "def show_image(dataset, domain, image_class, image_name):\n\timage_file = io.imread(os.path.join(\"data\", dataset, domain, \"images\", image_class, image_name))\n\tplt.imshow(image_file)\n\tplt.pause(0.001)\n\tplt.figure()", "def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ):\n\n for line in pixdump_iter( source, start, end, length, width, height, palette ):\n print( line )", "def display(self):\n for row in range(self.height):\n for col in range(self.width):\n char = '#' if self.pixels[row * self.width + col] else '.'\n print(char, end='')\n print()\n print()", "def drawData(data, writeLocation = \"../output/data.png\", gamma = 0.25):\n\n im = Image.new(\"RGB\", (data.shape[1], data.shape[0]))\n d = ImageDraw.Draw(im)\n \n _drawData(d, data, gamma)\n \n im.save(writeLocation, \"PNG\")", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return", "def imdisplay(filename, representation):\n img = read_image(filename, representation)\n if representation == GS_REP:\n plt.imshow(img, cmap=plt.cm.gray)\n else:\n plt.imshow(img)", "def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)", "def show_image(self):\n cv2.imshow(self.config.DISPLAY_NAME, self.image)", "def show_digit(self):\n x_train, _, _, _ = self._load_data()\n plt.imshow(x_train[0], cmap=plt.cm.binary)\n plt.show()", "def matplotlibDisplay(img, title=\"Image\", colorFlag = 'gray'):\n plt.imshow(img, colorFlag)\n plt.title(title)\n plt.xticks([])\n plt.yticks([])\n plt.show()", "def visualize_output(\n self,\n img: np.ndarray,\n output_data: np.ndarray) -> np.ndarray:\n draw_layer_tag = f'draw_layer_{self.id}_{self.draw_layer_index^1}'\n\n self.layer = dpg.add_draw_layer(\n parent=f'main_window_{self.id}',\n tag=draw_layer_tag,\n show=False\n )\n\n best_k = np.argsort(output_data)[-self.top_n:][::-1]\n class_names = np.array(self.class_names)[best_k]\n percentages = softmax(output_data[best_k])\n\n for i, (name, perc) in enumerate(zip(class_names, percentages)):\n label_pos = dpg.get_item_pos(f'cell_bar_{i}_{self.id}')\n dpg.draw_rectangle(\n parent=draw_layer_tag,\n pmin=label_pos,\n pmax=(\n label_pos[0] + _SCORE_COLUMN_WIDTH*perc,\n label_pos[1] + _FONT_SIZE),\n color=(0, 255, 0),\n fill=(0, 255, 0)\n )\n dpg.set_value(f'cell_perc_{i}_{self.id}', f'{perc*100:.2f}%')\n dpg.set_value(f'cell_name_{i}_{self.id}', name)\n\n return img", "def show(raster):\n gk.read(raster).show()", "def info(self):\n\n\t\tprint(\"Pixels on a side: {0}\".format(self.data.shape[0]))\n\t\tprint(\"Pixel size: {0}\".format(self.resolution))\n\t\tprint(\"Total angular size: {0}\".format(self.side_angle))\n\t\tprint(\"lmin={0:.1e} ; lmax={1:.1e}\".format(self.lmin,self.lmax))", "def convert_and_print(img_file, table_path, op_size): \n \n # load up the table and the image\n table_obj = {}\n with open(table_path,\"r\") as f:\n try:\n table_obj = json.load(f)\n except ValueError:\n print(\"problem parsing table JSON, aborting...\")\n return\n image = Image.open(img_file)\n w,h = image.size\n pixels = image.load()\n\n # Calculate character widths after scaling\n chr_width, chr_height = table_obj[Constants.META_KEY][Constants.IMAGE_WIDTH_KEY], table_obj[Constants.META_KEY][Constants.IMAGE_HEIGHT_KEY]\n chr_width = ceil(chr_width / op_size)\n chr_height = ceil(chr_height / op_size)\n # do the conversion and print it\n result = img2Ascii(pixels, table_obj[Constants.TABLE_KEY], w, h, chr_width, chr_height)\n for i in result:\n print(\"\".join(i))", "def print_metadata(self, camera, pixel_size, fileout=sys.stdout):\n print(self, end='\\n', file=fileout)\n print(cam_to_string(camera), end='\\n', file=fileout)\n print(\"pixels;{}\".format(pixel_size), file=fileout)", "def print_tile(tile: Image.Image):\n width, height = tile.size\n\n pixels = tile.getcolors(width * height)\n\n most_frequent_pixel = pixels[0]\n\n for count, color in pixels:\n if count > most_frequent_pixel[0]:\n most_frequent_pixel = (count, color)\n\n r, g, b = most_frequent_pixel[1]\n\n light = r * 299/1000 + g * 587/1000 + b * 114/1000\n\n char = get_char_from_light(light)\n\n color = get_xterm_color(r, g, b)\n\n print(\"\\u001b[38;5;\" + str(color) + \"m\" + char, end=\"\\033[0m\")", "def imshow(img):\n imadd(img)\n plt.ion()\n plt.show()", "def testImage():\n width = 200\n height = 200\n image = BitMap( width, height )\n \n # create a loop in order to draw some pixels\n \n for col in range(width):\n if col % 10 == 0: print 'col is', col\n for row in range(height):\n if col % 10 == 0 or row % 10 == 0:\n image.plotPoint( col, row ) \n \n # we have now looped through every image pixel\n # next, we write it out to a file\n \n image.saveFile( \"test.bmp\" )\n #changing the col and row number determines how big the grid is for the picture or how zoomed in it is. Changing the and to or just makes the grid go from dotted grid to lines.", "def show_one(img):\n dpi = 40\n margin = 0.05\n nda = sitk.GetArrayFromImage(img)\n spacing = img.GetSpacing()\n extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0)\n figsize = (5, 5)\n fig = plt.figure(figsize=figsize, dpi=dpi)\n ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])\n\n plt.set_cmap(\"gray\")\n ax.imshow(nda, extent=extent, interpolation=None)", "def image_show(inp, title=None):\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n\n if title is not None:\n plt.title(title)\n plt.pause(0.001)", "def imdisplay(filename, representation):\n im = read_image(filename, representation)\n if representation == 1:\n plt.imshow(im, cmap='gray')\n plt.show()\n if representation == 2:\n plt.imshow(im)\n plt.show()", "def display_napari(pos_img):\n global data\n global img_queue\n if pos_img is None:\n return\n # read image and z position\n image = np.reshape(pos_img[2:],(clip[0], clip[1]))\n z_pos = pos_img[1]\n color = pos_img[0]\n\n # write image into correct slice of data and update display\n data[z_pos] = np.squeeze(image)\n layer = viewer.layers[color]\n layer.data = data\n #print(\"updating \", z_pos, color)\n\n img_queue.task_done()", "def display_image(image):\n image = tf.constant(image)\n image = tf.image.convert_image_dtype(image, tf.uint8)\n return PIL.Image.fromarray(image.numpy())", "def print_out():\n pass", "def image(self, path):\n im = Image.open(path).convert(\"RGB\")\n # Convert the RGB image in printable image\n self._convert_and_print_image(im)", "def im_show(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()", "def print_images(i, df):\n \n images_folder_path = \"dataset/petfinder-adoption-prediction/train_images/\"\n plt.imshow(cv2.cvtColor(cv2.imread(images_folder_path+df.filename[i]), cv2.COLOR_BGR2RGB),);\n plt.axis(\"off\");\n plt.show()", "def do_info (self, line) :\n\t\tprint\n\t\tprint get_info_string( self.__image )\n\t\tprint", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))" ]
[ "0.75263417", "0.74676603", "0.74178773", "0.71454406", "0.70787203", "0.70770824", "0.6925922", "0.68538165", "0.6828833", "0.680339", "0.678513", "0.6639985", "0.6579925", "0.65075237", "0.64939463", "0.6484506", "0.6398455", "0.6392404", "0.63622975", "0.6340637", "0.63262016", "0.6307193", "0.6285712", "0.6285369", "0.62540925", "0.6243022", "0.6193243", "0.618649", "0.61821467", "0.60993254", "0.6072938", "0.6036931", "0.60203135", "0.600127", "0.59803253", "0.59702146", "0.59661436", "0.59528494", "0.59480464", "0.5936401", "0.5906899", "0.5900356", "0.58976775", "0.5876411", "0.5873731", "0.5862324", "0.5857106", "0.58434385", "0.58434385", "0.5839406", "0.58364713", "0.58272517", "0.57928085", "0.57885146", "0.57884765", "0.57857466", "0.5780859", "0.5778936", "0.5768562", "0.5755103", "0.5739007", "0.57383186", "0.57355034", "0.5731207", "0.5731207", "0.57270575", "0.5723809", "0.5722958", "0.57200104", "0.5708253", "0.5691348", "0.5683647", "0.5682703", "0.56772393", "0.5674491", "0.5666773", "0.56597376", "0.5652539", "0.56487155", "0.5647298", "0.5637696", "0.5628275", "0.56169486", "0.56141984", "0.5610702", "0.5606966", "0.56037694", "0.55859864", "0.5578834", "0.5575727", "0.5563797", "0.55576724", "0.55573845", "0.5550739", "0.55428267", "0.55365", "0.5532086", "0.55081344", "0.5500756", "0.5498963" ]
0.7227022
3