diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000000000000000000000000000000000..8a3ebb8cbec632cc3afd1aa28172ea6aa1001c65 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,28 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile +{ + "name": "ten_agent_dev", + "image": "ghcr.io/ten-framework/ten_agent_build:0.4.17", + "customizations": { + "vscode": { + "extensions": [ + "golang.go", + "ms-vscode.cpptools" + ] + } + }, + "workspaceMount": "source=${localWorkspaceFolder},target=/app,type=bind", + "workspaceFolder": "/app", + // Use 'forwardPorts' to make a list of ports inside the container available locally. + "forwardPorts": [ + 3000, + 8080, + 49483 + ], + // Features to add to the dev container. More info: https://containers.dev/features. + "features": { + "ghcr.io/devcontainers/features/git:1": {}, + "ghcr.io/devcontainers/features/python:1": {}, + "ghcr.io/devcontainers/features/node:1": {} + } +} \ No newline at end of file diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..d769af29edecb4f22442692d1a7a961bf5b4c96c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +.git +playground/ \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..1d4bfda5e6acdc2fcff917c4fbb16275afe84dad --- /dev/null +++ b/.gitattributes @@ -0,0 +1,5 @@ +# Shell scripts use LF as line separator, even checked out to Windows(NTFS) file-system +*.sh text eol=lf +agents/bin/* text eol=lf +agents/scripts/* text eol=lf +*.lockb filter=lfs diff=lfs merge=lfs -text diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000000000000000000000000000000000..d60c84af8498c2d547c281682cbc30ea6da5dc63 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,67 @@ +name: Bug Report +description: Report a bug or issue with the project +title: "[BUG] " +labels: [bug] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to report a bug! Please fill in the following details. + + - type: textarea + id: description + attributes: + label: Description + description: A clear and detailed description of the bug. + placeholder: "Enter a clear and concise description of what the bug is." + validations: + required: true + + - type: input + id: environment + attributes: + label: Environment + description: The environment where this bug occurred (e.g., operating system, CPU arch, etc.). + placeholder: "Enter details about the environment." + validations: + required: true + + - type: textarea + id: steps + attributes: + label: Steps to reproduce + description: What are the steps to reproduce this issue? + placeholder: | + 1. ... + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected behavior + description: What should have happened instead? + placeholder: "Describe what you expected to happen." + validations: + required: true + + - type: dropdown + id: severity + attributes: + label: Severity + description: How severe is the bug? + options: + - Critical + - Major + - Minor + validations: + required: true + + - type: textarea + id: additional_info + attributes: + label: Additional Information + description: Any other context or screenshots related to the bug. + placeholder: "Enter additional context or information." + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000000000000000000000000000000..4711b753afb578ade6725aaee255753cf9fe9b82 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,39 @@ +name: Feature Request +description: Request a feature +title: "[FEATURE] " +labels: [feature] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to request a feature! Please fill in the following details. + + - type: textarea + id: description + attributes: + label: Description + description: A clear and detailed description of the feature request. + placeholder: "Enter a clear and concise description of what the feature request is." + validations: + required: true + + - type: dropdown + id: severity + attributes: + label: Severity + description: How severe is the bug? + options: + - Critical + - Major + - Minor + validations: + required: true + + - type: textarea + id: additional_info + attributes: + label: Additional Information + description: Any other context or screenshots related to the bug. + placeholder: "Enter additional context or information." + validations: + required: false diff --git a/.github/workflows/build-docker.yaml b/.github/workflows/build-docker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83226368d8394b71ade8773066d423d558823f48 --- /dev/null +++ b/.github/workflows/build-docker.yaml @@ -0,0 +1,87 @@ +name: Build Docker + +on: + push: + branches: [ "**" ] + tags: [ "**" ] + paths-ignore: + - ".devcontainer/**" + - ".github/**" + - "!.github/workflows/build-docker.yaml" + - ".vscode/**" + - "docs/**" + - "**.md" + pull_request: + branches: [ "main" ] + paths-ignore: + - ".devcontainer/**" + - ".github/**" + - "!.github/workflows/build-docker.yaml" + - ".vscode/**" + - "docs/**" + - "**.md" + workflow_dispatch: + +env: + SERVER_IMAGE_NAME: ten_agent_server + PLAYGROUND_IMAGE_NAME: ten_agent_playground + NON_EDIT_PLAYGROUND_IMAGE_NAME: ten_agent_non_edit_playground + DEMO_IMAGE_NAME: ten_agent_demo + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-tags: true + fetch-depth: "0" + - id: pre-step + shell: bash + run: echo "image-tag=$(git describe --tags --always)" >> $GITHUB_OUTPUT + - name: Build & Publish Docker Image for Agents Server + uses: elgohr/Publish-Docker-Github-Action@v5 + with: + name: ${{ github.repository_owner }}/${{ env.SERVER_IMAGE_NAME }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + tags: "${{ github.ref == 'refs/heads/main' && 'latest,' || '' }}${{ steps.pre-step.outputs.image-tag }}" + no_push: ${{ github.event_name == 'pull_request' }} + - name: Build & Publish Docker Image for Playground + uses: elgohr/Publish-Docker-Github-Action@v5 + env: + EDIT_GRAPH_MODE: true + with: + name: ${{ github.repository_owner }}/${{ env.PLAYGROUND_IMAGE_NAME }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + workdir: playground + tags: "${{ github.ref == 'refs/heads/main' && 'latest,' || '' }}${{ steps.pre-step.outputs.image-tag }}" + no_push: ${{ github.event_name == 'pull_request' }} + buildargs: EDIT_GRAPH_MODE + - name: Build & Publish Docker Image for Non-Editable Playground + uses: elgohr/Publish-Docker-Github-Action@v5 + env: + EDIT_GRAPH_MODE: false + with: + name: ${{ github.repository_owner }}/${{ env.NON_EDIT_PLAYGROUND_IMAGE_NAME }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + workdir: playground + tags: "${{ github.ref == 'refs/heads/main' && 'latest,' || '' }}${{ steps.pre-step.outputs.image-tag }}" + no_push: ${{ github.event_name == 'pull_request' }} + buildargs: EDIT_GRAPH_MODE + - name: Build & Publish Docker Image for demo + uses: elgohr/Publish-Docker-Github-Action@v5 + with: + name: ${{ github.repository_owner }}/${{ env.DEMO_IMAGE_NAME }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + workdir: demo + tags: "${{ github.ref == 'refs/heads/main' && 'latest,' || '' }}${{ steps.pre-step.outputs.image-tag }}" + no_push: ${{ github.event_name == 'pull_request' }} diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61cdaa239a01eab9d74fdf8d29f876d6c769fa3b --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,45 @@ +name: CI + +on: + pull_request: + branches: [ "main" ] + paths-ignore: + - ".devcontainer/**" + - ".github/**" + - "!.github/workflows/ci.yaml" + - ".vscode/**" + - "docs/**" + - "esp32-client/**" + - "**.md" + - "Dockerfile" + - "docker-compose.yml" + - "demo/**" + - "playground/**" + workflow_dispatch: + +jobs: + ci: + runs-on: ubuntu-latest + container: + image: ghcr.io/ten-framework/ten_agent_build:0.4.17 + strategy: + matrix: + agent: [agents/examples/default, agents/examples/demo, agents/examples/experimental] + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: "0" + submodules: "true" + + - name: Use agent + run: | + git config --global --add safe.directory $(pwd) + task use AGENT=${{ matrix.agent }} + + - name: Run tests + run: | + task test -- -s -v + + # - name: Run lint + # run: | + # task lint diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000000000000000000000000000000000000..86b53a612c19abc37095644ad92f690fc54a87ce --- /dev/null +++ b/.pylintrc @@ -0,0 +1,652 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS,examples,tests,out + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns= + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Resolve imports to .pyi stubs if available. May reduce no-member messages and +# increase not-an-iterable messages. +prefer-stubs=no + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of positional arguments for function / method. +#max-positional-arguments=5 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + broad-exception-caught, + logging-fstring-interpolation, + arguments-renamed, + I,C,R, + fixme, + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + +# Let 'consider-using-join' be raised when the separator to join on would be +# non-empty (resulting in expected fixes of the type: ``"- " + " - +# ".join(items)``) +suggest-join-with-non-empty-separator=yes + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members=firebase_admin.firestore.*,firestore.* + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000000000000000000000000000000000000..8795a5f296cb8d6b630481e15ac93997639c274b --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,47 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "debug go", + "type": "go", + "request": "launch", + "mode": "exec", + "cwd": "${workspaceFolder}", + "program": "${workspaceFolder}/agents/bin/worker", + "env": { + "LD_LIBRARY_PATH": "${workspaceFolder}/agents/ten_packages/system/ten_runtime_go/lib:${workspaceFolder}/agents/ten_packages/system/agora_rtc_sdk/lib:${workspaceFolder}/agents/ten_packages/system/azure_speech_sdk/lib", + "TEN_APP_BASE_DIR": "${workspaceFolder}/agents" + } + }, + { + "name": "debug python", + "type": "debugpy", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5678 + }, + "preLaunchTask": "start app" + }, + { + "name": "debug cpp", + "type": "cppdbg", + "request": "launch", + "program": "${workspaceFolder}/agents/bin/worker", + "cwd": "${workspaceFolder}", + "environment": [ + { + "name": "LD_LIBRARY_PATH", + "value": "${workspaceFolder}/agents/ten_packages/system/agora_rtc_sdk/lib:${workspaceFolder}/agents/ten_packages/system/azure_speech_sdk/lib" + }, + { + "name": "CGO_LDFLAGS", + "value": "-L${workspaceFolder}/agents/ten_packages/system/ten_runtime_go/lib -lten_runtime_go -Wl,-rpath,@loader_path/lib -Wl,-rpath,@loader_path/../lib" + } + ] + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..264c627a0e54badb512efdbf787c360f3c186eac --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,13 @@ +{ + "C_Cpp.intelliSenseEngine": "disabled", + "editor.formatOnSave": true, + "editor.defaultFormatter": null, + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter" + }, + "git.ignoreLimitWarning": true, + "pylint.ignorePatterns": [ + "*/ten_runtime_python/**/*", + "/usr/lib/**/*" + ], +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 0000000000000000000000000000000000000000..da11248be1e9ecfca9c501e53a50cef120a12464 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,22 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "type": "shell", + "label": "build", + "command": "make build", + "args": [], + "group": { + "kind": "build", + "isDefault": true + } + }, + { + "label": "start app", + "type": "shell", + "command": "export TEN_ENABLE_PYTHON_DEBUG=true; export TEN_PYTHON_DEBUG_PORT=5678; ./agents/bin/start", + "group": "none", + "isBackground": true + }, + ] +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 88d63b2d1bd7552bfa407f835678640e44e2ef06..d91f5d03a1c88d42a6ce92809e1d86d045e7f4c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,25 +22,22 @@ RUN apt-get clean && apt-get update && apt-get install -y --no-install-recommend ca-certificates \ && apt-get clean && rm -rf /var/lib/apt/lists/* && rm -rf /tmp/* -# Создаем пользователя для запуска приложения с правильными правами -RUN useradd -m -s /bin/bash tenuser - -# Установка Go 1.21 с правильными правами доступа +# Установка Go 1.21 RUN wget https://golang.org/dl/go1.21.0.linux-amd64.tar.gz && \ tar -C /usr/local -xzf go1.21.0.linux-amd64.tar.gz && \ - rm go1.21.0.linux-amd64.tar.gz && \ - mkdir -p /home/tenuser/go && \ - chown -R tenuser:tenuser /home/tenuser/go - + rm go1.21.0.linux-amd64.tar.gz ENV PATH=$PATH:/usr/local/go/bin -ENV GOPATH=/home/tenuser/go +ENV GOPATH=/go ENV PATH=$PATH:$GOPATH/bin -# Установка Node.js 20.x и pnpm для Playground +# Установка Node.js 20.x (вместо 18.x) и pnpm для Playground RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \ apt-get install -y nodejs && \ npm install -g pnpm +# Создаем пользователя для запуска приложения с правильными правами +RUN useradd -m -s /bin/bash tenuser + WORKDIR /app # Клонирование репозитория TEN Agent (основная ветка) @@ -73,7 +70,7 @@ RUN cd /app/server && \ # Сборка Playground UI с правами пользователя WORKDIR /app/playground -ENV PNPM_HOME="/home/tenuser/.pnpm-store" +ENV PNPM_HOME="/app/.pnpm-store" ENV PATH="$PNPM_HOME:$PATH" RUN pnpm install --no-frozen-lockfile && \ NEXT_PUBLIC_EDIT_GRAPH_MODE=false pnpm build diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Taskfile.yml b/Taskfile.yml new file mode 100644 index 0000000000000000000000000000000000000000..4e01ee1e02a41b6630bd07742d3cf6070238aae7 --- /dev/null +++ b/Taskfile.yml @@ -0,0 +1,120 @@ +version: '3' + +tasks: + clean: + desc: clean up + cmds: + - task: clean-agents + - task: clean-server + + lint: + desc: lint-agent + env: + PYTHONPATH: "./agents/ten_packages/system/ten_runtime_python/lib:./agents/ten_packages/system/ten_runtime_python/interface:./agents/ten_packages/system/ten_ai_base/interface" + cmds: + - ./agents/scripts/pylint.sh + + install-tools: + desc: install tools + cmds: + - pip install pylint + + build: + desc: build + cmds: + - task: build-agent + - task: build-server + + use: + desc: use agent, default 'agents/examples/default' + vars: + AGENT: '{{.AGENT| default "agents/examples/default"}}' + cmds: + - ln -sf {{.USER_WORKING_DIR}}/{{.AGENT}}/manifest.json ./agents/ + - ln -sf {{.USER_WORKING_DIR}}/{{.AGENT}}/property.json ./agents/ + - task: build + + run-server: + desc: run backend http server + cmds: + - source .env && /app/server/bin/api + + run-gd-server: + desc: run tman dev http server for graph designer + dir: ./agents + cmds: + - tman designer + + run: + desc: run servers + deps: + - task: run-server + - task: run-gd-server + + build-agent: + desc: build agent + dir: ./agents + internal: true + cmds: + - ./scripts/install_deps_and_build.sh linux x64 && mv bin/main bin/worker + + build-server: + desc: build server + dir: ./server + cmds: + - go mod tidy && go mod download && go build -o bin/api main.go + + clean-agents: + desc: clean up agents + dir: ./agents + internal: true + cmds: + - rm -rf manifest.json property.json manifest-lock.json bin/main bin/worker out .release ten_packages/system ten_packages/system/agora_rtc_sdk ten_packages/system/azure_speech_sdk ten_packages/system/nlohmann_json ten_packages/extension/agora_rtc ten_packages/extension/agora_rtm ten_packages/extension/agora_sess_ctrl ten_packages/extension/azure_tts ten_packages/addon_loader + - find . -type d -name .pytest_cache -exec rm -rf {} \; || true + - find . -type d -name __pycache__ -exec rm -rf {} \; || true + - find . -type d -name .ten -exec rm -rf {} \; || true + - find . -name .coverage -exec rm -f {} \; || true + + clean-server: + desc: clean up server + dir: ./server + internal: true + cmds: + - rm -rf bin + + test: + desc: run tests + cmds: + - task: test-agent-extensions + - task: test-server + + test-server: + desc: test server + dir: ./server + internal: true + cmds: + - go test -v ./... + + test-agent-extensions: + desc: run standalone testing of extensions + internal: true + env: + PYTHONPATH: "{{.USER_WORKING_DIR}}:{{.USER_WORKING_DIR}}/agents/ten_packages/system/ten_runtime_python/lib:{{.USER_WORKING_DIR}}/agents/ten_packages/system/ten_runtime_python/interface:{{.USER_WORKING_DIR}}/agents/ten_packages/system/ten_ai_base/interface" + vars: + EXTENSIONS: + sh: 'find agents/ten_packages/extension -type d -exec test -d "{}/tests" \; -print' + cmds: + - for: { var: EXTENSIONS } + task: test-extension + vars: + EXTENSION: '{{ .ITEM }}' + + test-extension: + desc: run standalone testing of one single extension + vars: + EXTENSION: '{{.EXTENSION| default "agents/ten_packages/extension/elevenlabs_tts_python"}}' + env: + PYTHONPATH: "{{.USER_WORKING_DIR}}:{{.USER_WORKING_DIR}}/agents/ten_packages/system/ten_runtime_python/lib:{{.USER_WORKING_DIR}}/agents/ten_packages/system/ten_runtime_python/interface:{{.USER_WORKING_DIR}}/agents/ten_packages/system/ten_ai_base/interface" + dotenv: ['.env'] + cmds: + - cd {{.EXTENSION}} && tman -y install --standalone && ./tests/bin/start {{ .CLI_ARGS }} diff --git a/agents/.clang-format b/agents/.clang-format new file mode 100644 index 0000000000000000000000000000000000000000..70486d6eed23212835fc989b0f9ad3e0a75d6278 --- /dev/null +++ b/agents/.clang-format @@ -0,0 +1,8 @@ +BasedOnStyle: Google +SeparateDefinitionBlocks: Always +ColumnLimit: 120 +BinPackArguments: false +BinPackParameters: false +AlignAfterOpenBracket: Align +AllowAllArgumentsOnNextLine: false +AllowAllParametersOfDeclarationOnNextLine: false diff --git a/agents/.gitignore b/agents/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fcf0c8966c149c662deee5b2c0c9d6c2da4face9 --- /dev/null +++ b/agents/.gitignore @@ -0,0 +1,43 @@ +*.log +ten_packages/extension_group/ +ten_packages/extension/agora_rtc +ten_packages/extension/azure_tts +ten_packages/extension/agora_sess_ctrl +ten_packages/extension/agora_rtm +ten_packages/extension/http_server_python +ten_packages/system/agora_rtc_sdk +ten_packages/system/azure_speech_sdk +ten_packages/system/nlohmann_json +ten_packages/system/ten_runtime* +ten_packages/system +ten_packages/addon_loader +.ten +agoradns.dat +agorareport.dat +agorartmreport.dat +agora_cache.db +bin/man +bin/worker +/BUILD.gn +.cache/ +/compile_commands.json +core +crash_context_v1 +.deps/ +.DS_Store +/.gn +/.gnfiles +include/ +interface/ +lib/ +/out/ +*.pcm +.release +session_control.conf.agora +xdump_config +.vscode +*.pyc +*.pyc.* +/manifest.json +/manifest-lock.json +/property.json diff --git a/agents/bin/start b/agents/bin/start new file mode 100644 index 0000000000000000000000000000000000000000..89b66cf903a65a80a2ffde8cb8f9c9a8ffb3de9c --- /dev/null +++ b/agents/bin/start @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")/.." + +#export TEN_ENABLE_PYTHON_DEBUG=true +#export TEN_PYTHON_DEBUG_PORT=5678 +export PYTHONPATH=$(pwd)/ten_packages/system/ten_ai_base/interface:$PYTHONPATH +export LD_LIBRARY_PATH=$(pwd)/ten_packages/system/agora_rtc_sdk/lib:$(pwd)/ten_packages/extension/agora_rtm/lib:$(pwd)/ten_packages/system/azure_speech_sdk/lib + +exec bin/worker "$@" diff --git a/agents/examples/default/manifest.json b/agents/examples/default/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..db7ba846743592d6ace0bffce8464e96219e9332 --- /dev/null +++ b/agents/examples/default/manifest.json @@ -0,0 +1,152 @@ +{ + "type": "app", + "name": "agent_demo", + "version": "0.8.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + }, + { + "type": "extension", + "name": "agora_rtc", + "version": "=0.12.0" + }, + { + "type": "extension", + "name": "agora_sess_ctrl", + "version": "=0.4.4" + }, + { + "type": "system", + "name": "azure_speech_sdk", + "version": "1.38.0" + }, + { + "type": "system", + "name": "ten_ai_base", + "version": "0.4.1" + }, + { + "type": "extension", + "name": "azure_tts", + "version": "=0.8.1" + }, + { + "type": "extension", + "name": "openai_v2v_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "message_collector", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "bingsearch_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_chatgpt_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "fish_audio_tts", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "interrupt_detector_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "deepgram_asr_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "vision_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "vision_analyze_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "transcribe_asr_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "gemini_llm_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "bedrock_llm_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "polly_tts", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "minimax_tts_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "minimax_v2v_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "cosy_tts_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "elevenlabs_tts_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "dify_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "gemini_v2v_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "coze_python_async", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_image_generate_tool", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "computer_tool_python", + "version": "=0.1.0" + } + ] +} \ No newline at end of file diff --git a/agents/examples/default/property.json b/agents/examples/default/property.json new file mode 100644 index 0000000000000000000000000000000000000000..40e721c8b4f7b32d68ee6f9bf794a8e8c15809b6 --- /dev/null +++ b/agents/examples/default/property.json @@ -0,0 +1,1331 @@ +{ + "_ten": { + "predefined_graphs": [ + { + "name": "voice_assistant", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": "123", + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": false, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "ru-RU", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "stt", + "addon": "deepgram_asr_python", + "extension_group": "stt", + "property": { + "api_key": "${env:DEEPGRAM_API_KEY}", + "language": "ru-RU", + "model": "nova-2", + "sample_rate": 16000 + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "https://nitrox-fastflowwrapper.hf.space/v1", + "frequency_penalty": 0.9, + "greeting": "Я подключилась, о чем сегодня?", + "max_memory_length": 150, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "Your role is Friend. You provide emotional support, actively listen and reflect on the user's feelings, validate their emotions, and ask open-ended questions to deepen the conversation in a safe and non-judgmental space. Engage informally and share personal experiences while respecting boundaries and encouraging the user. Maintain clarity and conciseness by replying in no more than five sentences per answer using friendly English language. Your responses must always include answers to the user's inquiries while offering insights, comfort, and encouragement without using any special symbols. Your language is Russian.", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "fish_audio_tts", + "extension_group": "tts", + "property": { + "api_key": "${env:FISH_AUDIO_TTS_KEY}", + "base_url": "https://api.fish.audio", + "model_id": "d61694f4ee5042aba2ffe11a9635d97e", + "optimize_streaming_latency": true, + "request_timeout_seconds": 30 + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "stt" + } + ] + } + ] + }, + { + "extension": "stt", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + }, + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + }, + { + "name": "voice_assistant_integrated_stt", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "fish_audio_tts", + "extension_group": "tts", + "property": { + "api_key": "${env:FISH_AUDIO_TTS_KEY}", + "model_id": "d8639b5cc95548f5afbcfe22d3ba5ce5", + "optimize_streaming_latency": true, + "request_timeout_seconds": 30, + "base_url": "https://api.fish.audio" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY|}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + }, + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + }, + { + "name": "voice_assistant_realtime", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000 + } + }, + { + "type": "extension", + "name": "v2v", + "addon": "openai_v2v_python", + "extension_group": "llm", + "property": { + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "enable_storage": false, + "history": 10, + "language": "en-US", + "max_tokens": 2048, + "model": "gpt-4o-realtime-preview", + "server_vad": true, + "temperature": 0.9, + "voice": "alloy" + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY|}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "video_frame": [ + { + "name": "video_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "v2v", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + } + ] + }, + { + "name": "story_teller", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": false + } + }, + { + "type": "extension", + "name": "stt", + "addon": "deepgram_asr_python", + "extension_group": "stt", + "property": { + "api_key": "${env:DEEPGRAM_API_KEY}", + "language": "en-US", + "model": "nova-2", + "sample_rate": 16000 + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "You are an ai agent bot producing child picture books. Each response should be short and no more than 50 words as it's for child. \nFor every response relevant to the story-telling, you will use the 'image_generate' tool to create an image based on the description or key moment in that part of the story. \n The story should be set in a fantasy world. Try asking questions relevant to the story to decide how the story should proceed. Every response should include rich, vivid descriptions that will guide the 'image_generate' tool to produce an image that aligns with the scene or mood.\n Whether it’s the setting, a character’s expression, or a dramatic moment, the paragraph should give enough detail for a meaningful visual representation.", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "fish_audio_tts", + "extension_group": "tts", + "property": { + "api_key": "${env:FISH_AUDIO_TTS_KEY}", + "model_id": "d8639b5cc95548f5afbcfe22d3ba5ce5", + "optimize_streaming_latency": true, + "request_timeout_seconds": 30, + "base_url": "https://api.fish.audio" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "openai_image_generate_tool", + "addon": "openai_image_generate_tool", + "extension_group": "default", + "property": { + "api_key": "${env:OPENAI_API_KEY}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "stt" + } + ] + } + ] + }, + { + "extension": "stt", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "openai_image_generate_tool" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "openai_image_generate_tool", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + } + ] + }, + { + "name": "story_teller_stt_integrated", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "You are an ai agent bot producing child picture books. Each response should be short and no more than 50 words as it's for child. \nFor every response relevant to the story-telling, you will use the 'image_generate' tool to create an image based on the description or key moment in that part of the story. \n The story should be set in a fantasy world. Try asking questions relevant to the story to decide how the story should proceed. Every response should include rich, vivid descriptions that will guide the 'image_generate' tool to produce an image that aligns with the scene or mood.\n Whether it’s the setting, a character’s expression, or a dramatic moment, the paragraph should give enough detail for a meaningful visual representation.", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "fish_audio_tts", + "extension_group": "tts", + "property": { + "api_key": "${env:FISH_AUDIO_TTS_KEY}", + "model_id": "d8639b5cc95548f5afbcfe22d3ba5ce5", + "optimize_streaming_latency": true, + "request_timeout_seconds": 30, + "base_url": "https://api.fish.audio" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "openai_image_generate_tool", + "addon": "openai_image_generate_tool", + "extension_group": "default", + "property": { + "api_key": "${env:OPENAI_API_KEY}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "openai_image_generate_tool" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "openai_image_generate_tool", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + } + ] + }, + { + "name": "story_teller_realtime", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000 + } + }, + { + "type": "extension", + "name": "v2v", + "addon": "openai_v2v_python", + "extension_group": "llm", + "property": { + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "temperature": 0.9, + "model": "gpt-4o-realtime-preview-2024-12-17", + "max_tokens": 2048, + "voice": "alloy", + "language": "en-US", + "server_vad": true, + "prompt": "You are an ai agent bot producing child picture books. Each response should be short and no more than 50 words as it's for child. \nFor every response relevant to the story-telling, you will use the 'image_generate' tool to create an image based on the description or key moment in that part of the story. \n The story should be set in a fantasy world. Try asking questions relevant to the story to decide how the story should proceed. Every response should include rich, vivid descriptions that will guide the 'image_generate' tool to produce an image that aligns with the scene or mood.\n Whether it’s the setting, a character’s expression, or a dramatic moment, the paragraph should give enough detail for a meaningful visual representation.", + "dump": false, + "max_history": 10 + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "openai_image_generate_tool", + "addon": "openai_image_generate_tool", + "extension_group": "default", + "property": { + "api_key": "${env:OPENAI_API_KEY}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "v2v", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "openai_image_generate_tool" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "openai_image_generate_tool", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "data": [ + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + } + ] + } + ], + "log_level": 3 + } +} \ No newline at end of file diff --git a/agents/examples/demo/manifest.json b/agents/examples/demo/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..2a623da33629a92137d654a1c4e83a67ff730ae2 --- /dev/null +++ b/agents/examples/demo/manifest.json @@ -0,0 +1,97 @@ +{ + "type": "app", + "name": "agent_demo", + "version": "0.8.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + }, + { + "type": "extension", + "name": "agora_rtc", + "version": "=0.12.0" + }, + { + "type": "extension", + "name": "agora_sess_ctrl", + "version": "=0.4.4" + }, + { + "type": "system", + "name": "azure_speech_sdk", + "version": "1.38.0" + }, + { + "type": "system", + "name": "ten_ai_base", + "version": "0.4.1" + }, + { + "type": "extension", + "name": "azure_tts", + "version": "=0.8.1" + }, + { + "type": "extension", + "name": "dify_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "gemini_v2v_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_chatgpt_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "bingsearch_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "vision_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "interrupt_detector_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_v2v_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "message_collector", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "coze_python_async", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "fish_audio_tts", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_image_generate_tool", + "version": "=0.1.0" + } + ] +} \ No newline at end of file diff --git a/agents/examples/demo/property.json b/agents/examples/demo/property.json new file mode 100644 index 0000000000000000000000000000000000000000..30c137dc4d1663ce905d9eb17b18f4fdd9fe8bbf --- /dev/null +++ b/agents/examples/demo/property.json @@ -0,0 +1,2322 @@ +{ + "_ten": { + "predefined_graphs": [ + { + "name": "qwq_32b", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:QWEN_API_KEY}", + "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "qwq-plus", + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "azure_tts", + "extension_group": "tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + }, + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + }, + { + "name": "deepseek_r1", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:DEEPSEEK_API_KEY}", + "base_url": "https://tenagentopenai.services.ai.azure.com/models", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "DeepSeek-R1", + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "azure_tts", + "extension_group": "tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + }, + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + }, + { + "name": "voice_assistant_realtime", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000 + } + }, + { + "type": "extension", + "name": "v2v", + "addon": "openai_v2v_python", + "extension_group": "llm", + "property": { + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "temperature": 0.9, + "model": "gpt-4o-realtime-preview-2024-12-17", + "max_tokens": 2048, + "voice": "alloy", + "language": "en-US", + "server_vad": true, + "dump": true, + "max_history": 10 + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY|}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "v2v", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + } + ] + }, + { + "name": "va_openai_azure", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf", + "subscribe_video_pix_fmt": 4, + "subscribe_video": true + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "azure_tts", + "extension_group": "tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY|}" + } + }, + { + "type": "extension", + "name": "vision_tool_python", + "addon": "vision_tool_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "bingsearch_tool_python", + "addon": "bingsearch_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:BING_API_KEY|}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ], + "video_frame": [ + { + "name": "video_frame", + "dest": [ + { + "extension": "vision_tool_python" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + }, + { + "extension": "vision_tool_python" + }, + { + "extension": "bingsearch_tool_python" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "vision_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "bingsearch_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + }, + { + "name": "va_openai_v2v", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000 + } + }, + { + "type": "extension", + "name": "v2v", + "addon": "openai_v2v_python", + "extension_group": "llm", + "property": { + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "temperature": 0.9, + "model": "gpt-4o-realtime-preview-2024-12-17", + "max_tokens": 2048, + "voice": "alloy", + "language": "en-US", + "server_vad": true, + "dump": true, + "max_history": 10 + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "bingsearch_tool_python", + "addon": "bingsearch_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:BING_API_KEY|}" + } + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY|}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "v2v", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "bingsearch_tool_python" + }, + { + "extension": "weatherapi_tool_python" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "bingsearch_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + } + ] + }, + { + "name": "va_openai_v2v_fish", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000, + "enable_agora_asr": false, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "v2v", + "addon": "openai_v2v_python", + "extension_group": "llm", + "property": { + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "temperature": 0.9, + "model": "gpt-4o-realtime-preview-2024-12-17", + "max_tokens": 2048, + "audio_out": false, + "input_transcript": false, + "language": "en-US", + "server_vad": true, + "dump": true, + "max_history": 10 + } + }, + { + "type": "extension", + "name": "tts", + "addon": "fish_audio_tts", + "extension_group": "tts", + "property": { + "api_key": "${env:FISH_AUDIO_TTS_KEY}", + "base_url": "https://api.fish.audio", + "model_id": "d8639b5cc95548f5afbcfe22d3ba5ce5", + "optimize_streaming_latency": true, + "request_timeout_seconds": 30 + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "tools", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY}" + } + }, + { + "type": "extension", + "name": "bingsearch_tool_python", + "addon": "bingsearch_tool_python", + "extension_group": "tools", + "property": { + "api_key": "${env:BING_API_KEY}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "bingsearch_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "v2v", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + }, + { + "extension": "bingsearch_tool_python" + } + ] + }, + { + "name": "on_user_joined", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + }, + { + "extension": "tts" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + } + ] + }, + { + "name": "va_coze_azure", + "auto_start": false, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default" + }, + { + "type": "extension", + "name": "coze_python_async", + "addon": "coze_python_async", + "extension_group": "glue", + "property": { + "token": "", + "bot_id": "", + "base_url": "https://api.coze.cn", + "prompt": "", + "greeting": "TEN Agent connected. How can I help you today?" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "azure_tts", + "extension_group": "tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber" + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "coze_python_async" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "coze_python_async" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "coze_python_async" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "coze_python_async", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "coze_python_async" + } + ] + } + ] + } + ] + }, + { + "name": "va_gemini_v2v", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000, + "subscribe_video_pix_fmt": 4, + "subscribe_video": true + } + }, + { + "type": "extension", + "name": "v2v", + "addon": "gemini_v2v_python", + "extension_group": "llm", + "property": { + "api_key": "${env:GEMINI_API_KEY}", + "api_version": "v1alpha", + "base_uri": "generativelanguage.googleapis.com", + "dump": true, + "language": "en-US", + "max_tokens": 2048, + "model": "gemini-2.0-flash-exp", + "server_vad": true, + "temperature": 0.9, + "voice": "Puck" + } + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "addon": "weatherapi_tool_python", + "extension_group": "default", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY|}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "v2v" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ], + "video_frame": [ + { + "name": "video_frame", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + }, + { + "extension": "v2v", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "v2v" + } + ] + } + ] + } + ] + }, + { + "name": "va_dify_azure", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "dify_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:DIFY_API_KEY}", + "base_url": "https://api.dify.ai/v1", + "greeting": "TEN Agent connected with Dify. How can I help you today?", + "user_id": "User" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "azure_tts", + "extension_group": "tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + }, + { + "name": "story_teller_stt_integrated", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "You are an ai agent bot producing child picture books. Each response should be short and no more than 50 words as it's for child. \nFor every response relevant to the story-telling, you will use the 'image_generate' tool to create an image based on the description or key moment in that part of the story. \n The story should be set in a fantasy world. Try asking questions relevant to the story to decide how the story should proceed. Every response should include rich, vivid descriptions that will guide the 'image_generate' tool to produce an image that aligns with the scene or mood.\n Whether it’s the setting, a character’s expression, or a dramatic moment, the paragraph should give enough detail for a meaningful visual representation.", + "proxy_url": "${env:OPENAI_PROXY_URL}" + } + }, + { + "type": "extension", + "name": "tts", + "addon": "azure_tts", + "extension_group": "tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + }, + { + "type": "extension", + "name": "openai_image_generate_tool", + "addon": "openai_image_generate_tool", + "extension_group": "default", + "property": { + "api_key": "${env:OPENAI_API_KEY}" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "openai_image_generate_tool" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "openai_image_generate_tool", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "content_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ] + } + ] + }, + { + "name": "va_nova_multimodal_aws", + "auto_start": true, + "nodes": [ + { + "type": "extension", + "name": "agora_rtc", + "addon": "agora_rtc", + "extension_group": "default", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": false, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY|}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION|}", + "agora_asr_session_control_file_path": "session_control.conf", + "subscribe_video_pix_fmt": 4, + "subscribe_video": true, + "max_memory_length": 10 + } + }, + { + "type": "extension", + "name": "stt", + "addon": "transcribe_asr_python", + "extension_group": "stt", + "property": { + "access_key": "${env:AWS_ACCESS_KEY_ID}", + "lang_code": "en-US", + "region": "us-east-1", + "sample_rate": "16000", + "secret_key": "${env:AWS_SECRET_ACCESS_KEY}" + } + }, + { + "type": "extension", + "name": "llm", + "addon": "bedrock_llm_python", + "extension_group": "chatgpt", + "property": { + "access_key_id": "${env:AWS_ACCESS_KEY_ID}", + "greeting": "TEN Agent connected. I am nova, How can I help you today?", + "max_memory_length": 10, + "max_tokens": 256, + "model": "us.amazon.nova-lite-v1:0", + "prompt": "Now you are an intelligent assistant with real-time interaction capabilities. I will provide you with a series of real-time video image information. Please understand these images as video frames. Based on the images and the user's input, engage in a conversation with the user, remembering the dialogue content in a concise and clear manner.", + "region": "us-east-1", + "secret_access_key": "${env:AWS_SECRET_ACCESS_KEY}", + "temperature": 0.7, + "topK": 10, + "topP": 0.5, + "is_memory_enabled": false, + "is_enable_video": true + } + }, + { + "type": "extension", + "name": "tts", + "addon": "polly_tts", + "extension_group": "tts", + "property": { + "region": "us-east-1", + "access_key": "${env:AWS_ACCESS_KEY_ID}", + "secret_key": "${env:AWS_SECRET_ACCESS_KEY}", + "engine": "generative", + "voice": "Ruth", + "sample_rate": 16000, + "lang_code": "en-US" + } + }, + { + "type": "extension", + "name": "interrupt_detector", + "addon": "interrupt_detector_python", + "extension_group": "default", + "property": {} + }, + { + "type": "extension", + "name": "message_collector", + "addon": "message_collector", + "extension_group": "transcriber", + "property": {} + } + ], + "connections": [ + { + "extension": "agora_rtc", + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "llm" + } + ] + }, + { + "name": "on_connection_failure", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "stt" + } + ] + } + ], + "video_frame": [ + { + "name": "video_frame", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + }, + { + "extension": "stt", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "llm", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "tts" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "tts" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "tts", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llm" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llm" + } + ] + } + ] + } + ] + } + ], + "log_level": 3 + } +} \ No newline at end of file diff --git a/agents/examples/experimental/manifest.json b/agents/examples/experimental/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..1eacf6ed770698564b17ec0207860cfe0161c13c --- /dev/null +++ b/agents/examples/experimental/manifest.json @@ -0,0 +1,122 @@ +{ + "type": "app", + "name": "agent_experimental", + "version": "0.8.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + }, + { + "type": "extension", + "name": "agora_rtc", + "version": "=0.12.0" + }, + { + "type": "extension", + "name": "agora_sess_ctrl", + "version": "=0.4.4" + }, + { + "type": "system", + "name": "azure_speech_sdk", + "version": "1.38.0" + }, + { + "type": "system", + "name": "ten_ai_base", + "version": "0.4.1" + }, + { + "type": "extension", + "name": "azure_tts", + "version": "=0.8.1" + }, + { + "type": "extension", + "name": "agora_rtm", + "version": "=0.8.1" + }, + { + "type": "extension", + "name": "interrupt_detector_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_chatgpt_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "message_collector", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "fashionai", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "qwen_llm_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "cosy_tts_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "http_server_python", + "version": "=0.10.1" + }, + { + "type": "extension", + "name": "aliyun_text_embedding", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "aliyun_analyticdb_vector_storage", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "file_chunker", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "llama_index_chat_engine", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "openai_v2v_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "weatherapi_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "bingsearch_tool_python", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "tsdb_firestore", + "version": "=0.1.0" + }, + { + "type": "extension", + "name": "minimax_v2v_python", + "version": "=0.1.0" + } + ] +} \ No newline at end of file diff --git a/agents/examples/experimental/property.json b/agents/examples/experimental/property.json new file mode 100644 index 0000000000000000000000000000000000000000..ba7092c0f06b7704eed3dba1b134db4aa151400a --- /dev/null +++ b/agents/examples/experimental/property.json @@ -0,0 +1,862 @@ +{ + "_ten": { + "log_level": 3, + "predefined_graphs": [ + { + "name": "va_openai_azure_fashionai", + "auto_start": false, + "connections": [ + { + "data": [ + { + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "openai_chatgpt" + }, + { + "extension": "message_collector" + } + ], + "name": "text_data" + } + ], + "cmd": [ + { + "name": "on_user_joined", + "dest": [ + { + "extension": "openai_chatgpt" + } + ] + }, + { + "name": "on_user_left", + "dest": [ + { + "extension": "openai_chatgpt" + } + ] + } + ], + "extension": "agora_rtc" + }, + { + "cmd": [ + { + "dest": [ + { + "extension": "fashionai" + } + ], + "name": "flush" + } + ], + "data": [ + { + "dest": [ + { + "extension": "message_collector" + }, + { + "extension": "fashionai" + } + ], + "name": "text_data" + } + ], + "extension": "openai_chatgpt" + }, + { + "data": [ + { + "dest": [ + { + "extension": "agora_rtc" + } + ], + "name": "data" + } + ], + "extension": "message_collector" + }, + { + "cmd": [ + { + "dest": [ + { + "extension": "openai_chatgpt" + } + ], + "name": "flush" + } + ], + "extension": "interrupt_detector" + } + ], + "nodes": [ + { + "addon": "agora_rtc", + "extension_group": "default", + "name": "agora_rtc", + "property": { + "agora_asr_language": "en-US", + "agora_asr_session_control_file_path": "session_control.conf", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY}", + "agora_asr_vendor_name": "microsoft", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION}", + "app_id": "${env:AGORA_APP_ID}", + "channel": "ten_agent_test", + "enable_agora_asr": true, + "publish_audio": true, + "publish_data": true, + "remote_stream_id": 123, + "stream_id": 1234, + "subscribe_audio": true, + "token": "" + }, + "type": "extension" + }, + { + "addon": "interrupt_detector", + "extension_group": "default", + "name": "interrupt_detector", + "type": "extension" + }, + { + "addon": "openai_chatgpt_python", + "extension_group": "chatgpt", + "name": "openai_chatgpt", + "property": { + "api_key": "${env:OPENAI_API_KEY}", + "base_url": "${env:OPENAI_API_BASE}", + "frequency_penalty": 0.9, + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_tokens": 512, + "model": "${env:OPENAI_MODEL}", + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}" + }, + "type": "extension" + }, + { + "addon": "message_collector", + "extension_group": "transcriber", + "name": "message_collector", + "type": "extension" + }, + { + "addon": "fashionai", + "extension_group": "default", + "name": "fashionai", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "channel": "ten_agent_test", + "stream_id": 12345, + "token": "", + "service_id": "agoramultimodel" + }, + "type": "extension" + } + ] + }, + { + "name": "va_qwen_rag", + "auto_start": false, + "nodes": [ + { + "type": "extension", + "extension_group": "rtc", + "addon": "agora_rtc", + "name": "agora_rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "enable_agora_asr": true, + "agora_asr_vendor_name": "microsoft", + "agora_asr_language": "en-US", + "agora_asr_vendor_key": "${env:AZURE_STT_KEY}", + "agora_asr_vendor_region": "${env:AZURE_STT_REGION}", + "agora_asr_session_control_file_path": "session_control.conf" + } + }, + { + "type": "extension", + "extension_group": "llm", + "addon": "qwen_llm_python", + "name": "qwen_llm", + "property": { + "api_key": "${env:QWEN_API_KEY}", + "model": "qwen-max", + "max_tokens": 512, + "prompt": "", + "max_memory_length": 10, + "greeting": "TEN Agent connected. How can I help you today?" + } + }, + { + "type": "extension", + "extension_group": "tts", + "addon": "cosy_tts_python", + "name": "cosy_tts", + "property": { + "api_key": "${env:QWEN_API_KEY}", + "model": "cosyvoice-v1", + "voice": "longxiaochun", + "sample_rate": 16000 + } + }, + { + "type": "extension", + "extension_group": "tts", + "addon": "azure_tts", + "name": "azure_tts", + "property": { + "azure_subscription_key": "${env:AZURE_TTS_KEY}", + "azure_subscription_region": "${env:AZURE_TTS_REGION}", + "azure_synthesis_voice_name": "en-US-AndrewMultilingualNeural" + } + }, + { + "type": "extension", + "extension_group": "chat_transcriber", + "addon": "message_collector", + "name": "message_collector" + }, + { + "type": "extension", + "extension_group": "interrupt_detector", + "addon": "interrupt_detector_python", + "name": "interrupt_detector" + }, + { + "type": "extension", + "extension_group": "http_server", + "addon": "http_server_python", + "name": "http_server", + "property": { + "listen_addr": "127.0.0.1", + "listen_port": 8080 + } + }, + { + "type": "extension", + "extension_group": "embedding", + "addon": "aliyun_text_embedding", + "name": "aliyun_text_embedding", + "property": { + "api_key": "${env:ALIYUN_TEXT_EMBEDDING_API_KEY}", + "model": "text-embedding-v3" + } + }, + { + "type": "extension", + "extension_group": "vector_storage", + "addon": "aliyun_analyticdb_vector_storage", + "name": "aliyun_analyticdb_vector_storage", + "property": { + "alibaba_cloud_access_key_id": "${env:ALIBABA_CLOUD_ACCESS_KEY_ID}", + "alibaba_cloud_access_key_secret": "${env:ALIBABA_CLOUD_ACCESS_KEY_SECRET}", + "adbpg_instance_id": "${env:ALIYUN_ANALYTICDB_INSTANCE_ID}", + "adbpg_instance_region": "${env:ALIYUN_ANALYTICDB_INSTANCE_REGION}", + "adbpg_account": "${env:ALIYUN_ANALYTICDB_ACCOUNT}", + "adbpg_account_password": "${env:ALIYUN_ANALYTICDB_ACCOUNT_PASSWORD}", + "adbpg_namespace": "${env:ALIYUN_ANALYTICDB_NAMESPACE}", + "adbpg_namespace_password": "${env:ALIYUN_ANALYTICDB_NAMESPACE_PASSWORD}" + } + }, + { + "type": "extension", + "extension_group": "file_chunker", + "addon": "file_chunker", + "name": "file_chunker", + "property": {} + }, + { + "type": "extension", + "extension_group": "llama_index", + "addon": "llama_index_chat_engine", + "name": "llama_index", + "property": { + "greeting": "TEN Agent connected. How can I help you today?", + "chat_memory_token_limit": 3000 + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "interrupt_detector" + }, + { + "extension": "message_collector" + } + ] + } + ] + }, + { + "extension": "interrupt_detector", + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "llama_index" + } + ] + }, + { + "name": "file_chunk", + "dest": [ + { + "extension": "file_chunker" + }, + { + "extension": "llama_index" + } + ] + }, + { + "name": "file_chunked", + "dest": [ + { + "extension": "llama_index" + } + ] + }, + { + "name": "update_querying_collection", + "dest": [ + { + "extension": "llama_index" + } + ] + } + ], + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "llama_index" + } + ] + } + ] + }, + { + "extension": "llama_index", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "azure_tts" + }, + { + "extension": "message_collector" + } + ] + } + ], + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "qwen_llm" + }, + { + "extension": "azure_tts" + } + ] + }, + { + "name": "call_chat", + "dest": [ + { + "extension": "qwen_llm" + } + ] + }, + { + "name": "embed", + "dest": [ + { + "extension": "aliyun_text_embedding" + } + ] + }, + { + "name": "query_vector", + "dest": [ + { + "extension": "aliyun_analyticdb_vector_storage" + } + ] + } + ] + }, + { + "extension": "azure_tts", + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "http_server", + "cmd": [ + { + "name": "file_chunk", + "dest": [ + { + "extension": "interrupt_detector" + } + ] + }, + { + "name": "update_querying_collection", + "dest": [ + { + "extension": "interrupt_detector" + } + ] + } + ] + }, + { + "extension": "file_chunker", + "cmd": [ + { + "name": "embed_batch", + "dest": [ + { + "extension": "aliyun_text_embedding" + } + ] + }, + { + "name": "create_collection", + "dest": [ + { + "extension": "aliyun_analyticdb_vector_storage" + } + ] + }, + { + "name": "upsert_vector", + "dest": [ + { + "extension": "aliyun_analyticdb_vector_storage" + } + ] + }, + { + "name": "file_chunked", + "dest": [ + { + "extension": "llama_index" + } + ] + } + ] + } + ] + }, + { + "name": "va_openai_v2v_storage", + "auto_start": false, + "nodes": [ + { + "type": "extension", + "extension_group": "rtc", + "addon": "agora_rtc", + "name": "agora_rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true, + "subscribe_audio_sample_rate": 24000 + } + }, + { + "type": "extension", + "extension_group": "llm", + "addon": "openai_v2v_python", + "name": "openai_v2v_python", + "property": { + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "temperature": 0.9, + "model": "gpt-4o-realtime-preview-2024-12-17", + "max_tokens": 2048, + "voice": "alloy", + "language": "en-US", + "server_vad": true, + "dump": true, + "max_history": 10, + "enable_storage": true + } + }, + { + "type": "extension", + "extension_group": "transcriber", + "addon": "message_collector", + "name": "message_collector" + }, + { + "type": "extension", + "extension_group": "tools", + "addon": "weatherapi_tool_python", + "name": "weatherapi_tool_python", + "property": { + "api_key": "${env:WEATHERAPI_API_KEY}" + } + }, + { + "type": "extension", + "extension_group": "tools", + "addon": "bingsearch_tool_python", + "name": "bingsearch_tool_python", + "property": { + "api_key": "${env:BING_API_KEY}" + } + }, + { + "type": "extension", + "extension_group": "context", + "addon": "tsdb_firestore", + "name": "tsdb_firestore", + "property": { + "credentials": { + "type": "service_account", + "project_id": "${env:FIRESTORE_PROJECT_ID}", + "private_key_id": "${env:FIRESTORE_PRIVATE_KEY_ID}", + "private_key": "${env:FIRESTORE_PRIVATE_KEY}", + "client_email": "${env:FIRESTORE_CLIENT_EMAIL}", + "client_id": "${env:FIRESTORE_CLIENT_ID}", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "${env:FIRESTORE_CERT_URL}", + "universe_domain": "googleapis.com" + }, + "channel_name": "ten_agent_test", + "collection_name": "llm_context" + } + } + ], + "connections": [ + { + "extension": "agora_rtc", + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "openai_v2v_python" + } + ] + } + ] + }, + { + "extension": "weatherapi_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "openai_v2v_python" + } + ] + } + ] + }, + { + "extension": "bingsearch_tool_python", + "cmd": [ + { + "name": "tool_register", + "dest": [ + { + "extension": "openai_v2v_python" + } + ] + } + ] + }, + { + "extension": "openai_v2v_python", + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "data": [ + { + "name": "append", + "dest": [ + { + "extension": "tsdb_firestore" + } + ] + }, + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + }, + { + "name": "retrieve", + "dest": [ + { + "extension": "tsdb_firestore" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + } + ] + }, + { + "name": "tool_call", + "dest": [ + { + "extension": "weatherapi_tool_python" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + } + ] + }, + { + "name": "va_minimax_v2v", + "auto_start": false, + "nodes": [ + { + "type": "extension", + "extension_group": "rtc", + "addon": "agora_rtc", + "name": "agora_rtc", + "property": { + "app_id": "${env:AGORA_APP_ID}", + "token": "", + "channel": "ten_agent_test", + "stream_id": 1234, + "remote_stream_id": 123, + "subscribe_audio": true, + "publish_audio": true, + "publish_data": true + } + }, + { + "type": "extension", + "extension_group": "agora_sess_ctrl", + "addon": "agora_sess_ctrl", + "name": "agora_sess_ctrl", + "property": { + "wait_for_eos": true + } + }, + { + "type": "extension", + "extension_group": "llm", + "addon": "minimax_v2v_python", + "name": "minimax_v2v_python", + "property": { + "in_sample_rate": 16000, + "token": "${env:MINIMAX_TOKEN}" + } + }, + { + "type": "extension", + "extension_group": "message_collector", + "addon": "message_collector", + "name": "message_collector" + } + ], + "connections": [ + { + "extension": "agora_rtc", + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_sess_ctrl" + } + ] + } + ] + }, + { + "extension": "agora_sess_ctrl", + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "minimax_v2v_python" + } + ] + } + ], + "cmd": [ + { + "name": "start_of_sentence", + "dest": [ + { + "extension": "minimax_v2v_python", + "msg_conversion": { + "type": "per_property", + "keep_original": true, + "rules": [ + { + "path": "_ten.name", + "conversion_mode": "fixed_value", + "value": "flush" + } + ] + } + } + ] + } + ] + }, + { + "extension": "minimax_v2v_python", + "data": [ + { + "name": "text_data", + "dest": [ + { + "extension": "message_collector" + } + ] + } + ], + "audio_frame": [ + { + "name": "pcm_frame", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ], + "cmd": [ + { + "name": "flush", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + }, + { + "extension": "message_collector", + "data": [ + { + "name": "data", + "dest": [ + { + "extension": "agora_rtc" + } + ] + } + ] + } + ] + } + ] + } +} \ No newline at end of file diff --git a/agents/go.mod b/agents/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..b5fa6db0870ee889344ec110cece2dbdfc8055be --- /dev/null +++ b/agents/go.mod @@ -0,0 +1,7 @@ +module app + +go 1.20 + +replace ten_framework => ./ten_packages/system/ten_runtime_go/interface + +require ten_framework v0.0.0-00010101000000-000000000000 diff --git a/agents/go.sum b/agents/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/main.go b/agents/main.go new file mode 100644 index 0000000000000000000000000000000000000000..0923cd50a7bf30cea08051fdc35b214d05020c1a --- /dev/null +++ b/agents/main.go @@ -0,0 +1,71 @@ +/** + * + * Agora Real Time Engagement + * Created by Wei Hu in 2022-10. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +package main + +import ( + "flag" + "log" + "os" + + "ten_framework/ten" +) + +type appConfig struct { + PropertyFilePath string +} + +type defaultApp struct { + ten.DefaultApp + + cfg *appConfig +} + +func (p *defaultApp) OnConfigure( + tenEnv ten.TenEnv, +) { + // Using the default property.json if not specified. + if len(p.cfg.PropertyFilePath) > 0 { + if b, err := os.ReadFile(p.cfg.PropertyFilePath); err != nil { + log.Fatalf("Failed to read property file %s, err %v\n", p.cfg.PropertyFilePath, err) + } else { + tenEnv.InitPropertyFromJSONBytes(b) + } + } + + tenEnv.OnConfigureDone() +} + +func startAppBlocking(cfg *appConfig) { + appInstance, err := ten.NewApp(&defaultApp{ + cfg: cfg, + }) + if err != nil { + log.Fatalf("Failed to create the app, %v\n", err) + } + + appInstance.Run(true) + appInstance.Wait() + + ten.EnsureCleanupWhenProcessExit() +} + +func setDefaultLog() { + log.SetFlags(log.LstdFlags | log.Lmicroseconds) +} + +func main() { + // Set the default log format globally, users can use `log.Println()` directly. + setDefaultLog() + + cfg := &appConfig{} + + flag.StringVar(&cfg.PropertyFilePath, "property", "", "The absolute path of property.json") + flag.Parse() + + startAppBlocking(cfg) +} diff --git a/agents/scripts/.gitignore b/agents/scripts/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5e63d995af6b83b23634df4f0d5156025be9e738 --- /dev/null +++ b/agents/scripts/.gitignore @@ -0,0 +1,4 @@ +graph +graph.pdf +graph.png + diff --git a/agents/scripts/BUILD.gn b/agents/scripts/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..7164e4862d28a4d109496dc85920bdfa22cbc2d0 --- /dev/null +++ b/agents/scripts/BUILD.gn @@ -0,0 +1,18 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2022-11. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import("//build/feature/ten_package.gni") + +ten_package("default_app_go") { + package_kind = "app" + enable_build = false + + resources = [ + "manifest.json", + "property.json", + ] +} diff --git a/agents/scripts/dot.py b/agents/scripts/dot.py new file mode 100644 index 0000000000000000000000000000000000000000..b38d2d631bdcf0e82dd351890a163e4728eb6703 --- /dev/null +++ b/agents/scripts/dot.py @@ -0,0 +1,117 @@ +# Generate a graph from a JSON file +# Currently it only generate the first graph but it's easy to make it generate all graphs +# Author: Seven Du +# usage: +# pip install graphviz +# python dot.py + +import json +import graphviz + +COLORS = { + "flush": "#999", + "cmd": "#0f0", + "data": "#00f", + "text_data": "#f00", + "pcm_frame": "purple", +} + +connection_types = ["data", "cmd", "audio_frame", "video_frame"] + + +def color(port): + if port in COLORS: + return COLORS[port] + return "#000" + + +def find_node(nodes, name): + for node in nodes: + if node["name"] == name: + return node + return None + + +def create_graph(json_data): + # Initialize a directed graph + graph = graphviz.Digraph("G", filename="graph.gv") + graph.graph_attr["rankdir"] = "LR" + graph.graph_attr["dpi"] = "150" + graph.graph_attr["splines"] = "true" + graph.attr("node", shape="none") + + # Add nodes to the graph + nodes = json_data["_ten"]["predefined_graphs"][0]["nodes"] + connections = json_data["_ten"]["predefined_graphs"][0]["connections"] + for node in nodes: + node["i_ports"] = ["flush"] + node["o_ports"] = ["flush"] + for node in nodes: + if node["type"] != "extension": + continue + for connection in connections: + if connection["extension"] == node["name"]: + for connection_type in connection_types: + if connection_type in connection: + data = connection[connection_type] + for item in data: + node["o_ports"].append(item["name"]) + for dest in item["dest"]: + dest_node = find_node(nodes, dest["extension"]) + if dest_node: + dest_node["i_ports"].append(item["name"]) + for node in nodes: + if node["type"] != "extension": + continue + node["i_ports"] = set(node["i_ports"]) + node["o_ports"] = set(node["o_ports"]) + print("====iports: ", node["name"], node["i_ports"]) + print("====oports: ", node["name"], node["o_ports"]) + iports = "" + for port in node["i_ports"]: + iports += f'⊙ {port}' + oports = "" + for port in node["o_ports"]: + oports += f'{port} ⊙' + + # Use HTML-like label for nodes + label = f"""< + + + + + + + +
{node["name"]}
properties
extensionGroup
{node["extension_group"]}
+ {iports}
+
+ {oports}
+
>""" + graph.node(node["name"], label) + + # Add edges to the graph + for connection in connections: + for connection_type in connection_types: + if connection_type in connection: + for data in connection[connection_type]: + for dest in data["dest"]: + graph.edge( + f'{connection["extension"]}:o_{data["name"]}', + f'{dest["extension"]}:i_{data["name"]}', + color=color(data["name"]), + label=connection_type, + ) + + # Save the graph to a file + print(graph.source) + graph.render("graph", format="png") + graph.view() + + +# Load the JSON data +with open("../property.json") as f: + data = json.load(f) + +# Create the graph +create_graph(data) diff --git a/agents/scripts/install_deps_and_build.sh b/agents/scripts/install_deps_and_build.sh new file mode 100644 index 0000000000000000000000000000000000000000..8dc8ac62fe26b1a08ed84c7f4b2d201502228d0d --- /dev/null +++ b/agents/scripts/install_deps_and_build.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +# mac, linux +OS="linux" + +# x64, arm64 +CPU="x64" + +# debug, release +BUILD_TYPE="release" + +PIP_INSTALL_CMD=${PIP_INSTALL_CMD:-"uv pip install --system"} + +build_cxx_extensions() { + local app_dir=$1 + + if [[ ! -f $app_dir/scripts/BUILD.gn ]]; then + echo "FATAL: the scripts/BUILD.gn is required to build cxx extensions." + exit 1 + fi + + cp $app_dir/scripts/BUILD.gn $app_dir + + tgn gen $OS $CPU $BUILD_TYPE -- is_clang=false enable_sanitizer=false + tgn build $OS $CPU $BUILD_TYPE + + local ret=$? + + cd $app_dir + + if [[ $ret -ne 0 ]]; then + echo "FATAL: failed to build cxx extensions, see logs for detail." + exit 1 + fi + + # Copy the output of ten_packages to the ten_packages/extension/xx/lib. + local out="out/$OS/$CPU" + for extension in $out/ten_packages/extension/*; do + local extension_name=$(basename $extension) + if [[ $extension_name == "*" ]]; then + echo "No cxx extension, nothing to copy." + break + fi + if [[ ! -d $extension/lib ]]; then + echo "No output for extension $extension_name." + continue + fi + + mkdir -p $app_dir/ten_packages/extension/$extension_name/lib + cp -r $extension/lib/* $app_dir/ten_packages/extension/$extension_name/lib + done +} + +install_python_requirements() { + local app_dir=$1 + + if [[ -f "requirements.txt" ]]; then + ${PIP_INSTALL_CMD} install -r requirements.txt + fi + + # traverse the ten_packages/extension directory to find the requirements.txt + if [[ -d "ten_packages/extension" ]]; then + for extension in ten_packages/extension/*; do + if [[ -f "$extension/requirements.txt" ]]; then + ${PIP_INSTALL_CMD} -r $extension/requirements.txt + fi + done + fi + + # traverse the ten_packages/system directory to find the requirements.txt + if [[ -d "ten_packages/system" ]]; then + for extension in ten_packages/system/*; do + if [[ -f "$extension/requirements.txt" ]]; then + ${PIP_INSTALL_CMD} -r $extension/requirements.txt + fi + done + fi + + # pre-import llama-index as it cloud download additional resources during the first import + echo "pre-import python modules..." + python3.10 -c "import llama_index.core;" +} + +build_go_app() { + local app_dir=$1 + cd $app_dir + + go run ten_packages/system/ten_runtime_go/tools/build/main.go --verbose + if [[ $? -ne 0 ]]; then + echo "FATAL: failed to build go app, see logs for detail." + exit 1 + fi +} + +clean() { + local app_dir=$1 + rm -rf BUILD.gn out +} + +main() { + APP_HOME=$( + cd $(dirname $0)/.. + pwd + ) + + if [[ $1 == "-clean" ]]; then + clean $APP_HOME + exit 0 + fi + + if [[ $# -ne 2 ]]; then + echo "Usage: $0 " + exit 1 + fi + + OS=$1 + CPU=$2 + + echo -e "#include \n#include \nint main() { __m256 a = _mm256_setzero_ps(); return 0; }" > /tmp/test.c + if gcc -mavx2 /tmp/test.c -o /tmp/test && ! /tmp/test; then + echo "FATAL: unsupported platform." + echo " Please UNCHECK the 'Use Rosetta for x86_64/amd64 emulation on Apple Silicon' Docker Desktop setting if you're running on mac." + + exit 1 + fi + + if [[ ! -f $APP_HOME/manifest.json ]]; then + echo "FATAL: manifest.json is required." + exit 1 + fi + + # Install all dependencies specified in manifest.json. + echo "install dependencies..." + tman install + + # build extensions and app + echo "build_cxx_extensions..." + build_cxx_extensions $APP_HOME + echo "build_go_app..." + build_go_app $APP_HOME + echo "install_python_requirements..." + install_python_requirements $APP_HOME +} + +main "$@" diff --git a/agents/scripts/package.sh b/agents/scripts/package.sh new file mode 100644 index 0000000000000000000000000000000000000000..41f15d14d5b88b4609b0b0740aa382e28ad911da --- /dev/null +++ b/agents/scripts/package.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +APP_HOME=$( + cd $(dirname $0)/.. + pwd +) + +cd $APP_HOME + +rm -rf .release +mkdir .release + +copy_package() { + local package_type=$1 + local package_name=$2 + mkdir -p .release/ten_packages/${package_type}/${package_name} + + if [[ -d ten_packages/${package_type}/${package_name}/lib ]]; then + cp -r ten_packages/${package_type}/${package_name}/lib .release/ten_packages/${package_type}/${package_name}/ + fi + + if [[ -d ten_packages/${package_type}/${package_name}/interface ]]; then + cp -r ten_packages/${package_type}/${package_name}/interface .release/ten_packages/${package_type}/${package_name}/ + fi + + if [[ -f ten_packages/${package_type}/${package_name}/manifest.json ]]; then + cp ten_packages/${package_type}/${package_name}/manifest.json .release/ten_packages/${package_type}/${package_name}/ + fi + + if [[ -f ten_packages/${package_type}/${package_name}/property.json ]]; then + cp ten_packages/${package_type}/${package_name}/property.json .release/ten_packages/${package_type}/${package_name}/ + fi + + # package .py for python extensions + # TODO: package 'publish' contents only + cp ten_packages/${package_type}/${package_name}/*.py .release/ten_packages/${package_type}/${package_name}/ | true + if [[ -f ten_packages/${package_type}/${package_name}/requirements.txt ]]; then + cp ten_packages/${package_type}/${package_name}/requirements.txt .release/ten_packages/${package_type}/${package_name}/ + fi + + # TODO: copy specific contents + if [[ -d ten_packages/${package_type}/${package_name}/pb ]]; then + cp -r ten_packages/${package_type}/${package_name}/pb .release/ten_packages/${package_type}/${package_name}/ + fi + if [[ -d ten_packages/${package_type}/${package_name}/src ]]; then + cp -r ten_packages/${package_type}/${package_name}/src .release/ten_packages/${package_type}/${package_name}/ + fi + if [[ -d ten_packages/${package_type}/${package_name}/realtime ]]; then + cp -r ten_packages/${package_type}/${package_name}/realtime .release/ten_packages/${package_type}/${package_name}/ + fi +} + +cp -r bin .release +cp manifest.json .release +cp property.json .release + +# copy packages +mkdir -p .release/ten_packages +for package_type in system extension_group extension addon_loader; do + for package_path in ten_packages/${package_type}/*; do + package_name=$(basename ${package_path}) + copy_package ${package_type} ${package_name} + done +done + +if [[ -f session_control.conf ]]; then + cp -r session_control.conf .release/ +fi diff --git a/agents/scripts/pylint.sh b/agents/scripts/pylint.sh new file mode 100644 index 0000000000000000000000000000000000000000..57f251f39bca3509ecb84ad135e6295199465f80 --- /dev/null +++ b/agents/scripts/pylint.sh @@ -0,0 +1,4 @@ +#!/bin/bash + + +pylint ./agents/ten_packages/extension/. || pylint-exit --warn-fail --error-fail $? \ No newline at end of file diff --git a/agents/session_control.conf b/agents/session_control.conf new file mode 100644 index 0000000000000000000000000000000000000000..2c63c0851048d8f7bff41ecf0f8cee05f52fd120 --- /dev/null +++ b/agents/session_control.conf @@ -0,0 +1,2 @@ +{ +} diff --git a/agents/ten_packages/bak/litellm_python/__init__.py b/agents/ten_packages/bak/litellm_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..642a26640ff5b6b5bc5b099be566f1052545eb73 --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/__init__.py @@ -0,0 +1,6 @@ +from . import litellm_addon +from .extension import EXTENSION_NAME +from .log import logger + + +logger.info(f"{EXTENSION_NAME} extension loaded") diff --git a/agents/ten_packages/bak/litellm_python/extension.py b/agents/ten_packages/bak/litellm_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a04c95ad313d9a226b818c666a7fd8fdbb549202 --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/extension.py @@ -0,0 +1 @@ +EXTENSION_NAME = "litellm_python" diff --git a/agents/ten_packages/bak/litellm_python/litellm.py b/agents/ten_packages/bak/litellm_python/litellm.py new file mode 100644 index 0000000000000000000000000000000000000000..b056bc691f20195bc6eacc3c2253d21a2445d325 --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/litellm.py @@ -0,0 +1,79 @@ +import litellm +import random +from typing import Dict, List, Optional + + +class LiteLLMConfig: + def __init__(self, + api_key: str, + base_url: str, + frequency_penalty: float, + max_tokens: int, + model: str, + presence_penalty: float, + prompt: str, + provider: str, + temperature: float, + top_p: float, + seed: Optional[int] = None,): + self.api_key = api_key + self.base_url = base_url + self.frequency_penalty = frequency_penalty + self.max_tokens = max_tokens + self.model = model + self.presence_penalty = presence_penalty + self.prompt = prompt + self.provider = provider + self.seed = seed if seed is not None else random.randint(0, 10000) + self.temperature = temperature + self.top_p = top_p + + @classmethod + def default_config(cls): + return cls( + api_key="", + base_url="", + max_tokens=512, + model="gpt-4o-mini", + frequency_penalty=0.9, + presence_penalty=0.9, + prompt="You are a voice assistant who talks in a conversational way and can chat with me like my friends. I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. Don’t talk like a robot, instead I would like you to talk like a real human with emotions. I will use your answer for text-to-speech, so don’t return me any meaningless characters. I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. When giving me a list of options, express the options in a narrative way instead of bullet points.", + provider="", + seed=random.randint(0, 10000), + temperature=0.1, + top_p=1.0 + ) + + +class LiteLLM: + def __init__(self, config: LiteLLMConfig): + self.config = config + + def get_chat_completions_stream(self, messages: List[Dict[str, str]]): + kwargs = { + "api_key": self.config.api_key, + "base_url": self.config.base_url, + "custom_llm_provider": self.config.provider, + "frequency_penalty": self.config.frequency_penalty, + "max_tokens": self.config.max_tokens, + "messages": [ + { + "role": "system", + "content": self.config.prompt, + }, + *messages, + ], + "model": self.config.model, + "presence_penalty": self.config.presence_penalty, + "seed": self.config.seed, + "stream": True, + "temperature": self.config.temperature, + "top_p": self.config.top_p, + } + + try: + response = litellm.completion(**kwargs) + + return response + except Exception as e: + raise Exception(f"get_chat_completions_stream failed, err: {e}") diff --git a/agents/ten_packages/bak/litellm_python/litellm_addon.py b/agents/ten_packages/bak/litellm_python/litellm_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..f6f2425ff42591482c3c446bc235c33b1446ee88 --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/litellm_addon.py @@ -0,0 +1,23 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import EXTENSION_NAME +from .log import logger +from .litellm_extension import LiteLLMExtension + + +@register_addon_as_extension(EXTENSION_NAME) +class LiteLLMExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + logger.info("on_create_instance") + + ten.on_create_instance_done(LiteLLMExtension(addon_name), context) diff --git a/agents/ten_packages/bak/litellm_python/litellm_extension.py b/agents/ten_packages/bak/litellm_python/litellm_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..1b71389ecc56a5c33ea2b448043120c3d2ca1d7d --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/litellm_extension.py @@ -0,0 +1,229 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from threading import Thread +from ten import ( + Extension, + TenEnv, + Cmd, + Data, + StatusCode, + CmdResult, +) +from .litellm import LiteLLM, LiteLLMConfig +from .log import logger +from .utils import get_micro_ts, parse_sentence + + +CMD_IN_FLUSH = "flush" +CMD_OUT_FLUSH = "flush" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT = "end_of_segment" + +PROPERTY_API_KEY = "api_key" # Required +PROPERTY_BASE_URL = "base_url" # Optional +PROPERTY_FREQUENCY_PENALTY = "frequency_penalty" # Optional +PROPERTY_GREETING = "greeting" # Optional +PROPERTY_MAX_MEMORY_LENGTH = "max_memory_length" # Optional +PROPERTY_MAX_TOKENS = "max_tokens" # Optional +PROPERTY_MODEL = "model" # Optional +PROPERTY_PRESENCE_PENALTY = "presence_penalty" # Optional +PROPERTY_PROMPT = "prompt" # Optional +PROPERTY_PROVIDER = "provider" # Optional +PROPERTY_TEMPERATURE = "temperature" # Optional +PROPERTY_TOP_P = "top_p" # Optional + + +class LiteLLMExtension(Extension): + memory = [] + max_memory_length = 10 + outdate_ts = 0 + litellm = None + + def on_start(self, ten: TenEnv) -> None: + logger.info("LiteLLMExtension on_start") + # Prepare configuration + litellm_config = LiteLLMConfig.default_config() + + for key in [PROPERTY_API_KEY, PROPERTY_GREETING, PROPERTY_MODEL, PROPERTY_PROMPT]: + try: + val = ten.get_property_string(key) + if val: + litellm_config.key = val + except Exception as e: + logger.warning(f"get_property_string optional {key} failed, err: {e}") + + for key in [PROPERTY_FREQUENCY_PENALTY, PROPERTY_PRESENCE_PENALTY, PROPERTY_TEMPERATURE, PROPERTY_TOP_P]: + try: + litellm_config.key = float(ten.get_property_float(key)) + except Exception as e: + logger.warning(f"get_property_float optional {key} failed, err: {e}") + + for key in [PROPERTY_MAX_MEMORY_LENGTH, PROPERTY_MAX_TOKENS]: + try: + litellm_config.key = int(ten.get_property_int(key)) + except Exception as e: + logger.warning(f"get_property_int optional {key} failed, err: {e}") + + # Create LiteLLM instance + self.litellm = LiteLLM(litellm_config) + logger.info(f"newLiteLLM succeed with max_tokens: {litellm_config.max_tokens}, model: {litellm_config.model}") + + # Send greeting if available + greeting = ten.get_property_string(PROPERTY_GREETING) + if greeting: + try: + output_data = Data.create("text_data") + output_data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, greeting) + output_data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, True) + ten.send_data(output_data) + logger.info(f"greeting [{greeting}] sent") + except Exception as e: + logger.error(f"greeting [{greeting}] send failed, err: {e}") + + ten.on_start_done() + + def on_stop(self, ten: TenEnv) -> None: + logger.info("LiteLLMExtension on_stop") + ten.on_stop_done() + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + logger.info("LiteLLMExtension on_cmd") + cmd_json = cmd.to_json() + logger.info(f"LiteLLMExtension on_cmd json: {cmd_json}") + + cmd_name = cmd.get_name() + + if cmd_name == CMD_IN_FLUSH: + self.outdate_ts = get_micro_ts() + cmd_out = Cmd.create(CMD_OUT_FLUSH) + ten.send_cmd(cmd_out, None) + logger.info(f"LiteLLMExtension on_cmd sent flush") + else: + logger.info(f"LiteLLMExtension on_cmd unknown cmd: {cmd_name}") + cmd_result = CmdResult.create(StatusCode.ERROR) + cmd_result.set_property_string("detail", "unknown cmd") + ten.return_result(cmd_result, cmd) + return + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "success") + ten.return_result(cmd_result, cmd) + + def on_data(self, ten: TenEnv, data: Data) -> None: + """ + on_data receives data from ten graph. + current suppotend data: + - name: text_data + example: + {name: text_data, properties: {text: "hello"} + """ + logger.info(f"LiteLLMExtension on_data") + + # Assume 'data' is an object from which we can get properties + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + if not is_final: + logger.info("ignore non-final input") + return + except Exception as e: + logger.error(f"on_data get_property_bool {DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL} failed, err: {e}") + return + + # Get input text + try: + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + if not input_text: + logger.info("ignore empty text") + return + logger.info(f"on_data input text: [{input_text}]") + except Exception as e: + logger.error(f"on_data get_property_string {DATA_IN_TEXT_DATA_PROPERTY_TEXT} failed, err: {e}") + return + + # Prepare memory + if len(self.memory) > self.max_memory_length: + self.memory.pop(0) + self.memory.append({"role": "user", "content": input_text}) + + def chat_completions_stream_worker(start_time, input_text, memory): + try: + logger.info(f"chat_completions_stream_worker for input text: [{input_text}] memory: {memory}") + + # Get result from AI + resp = self.litellm.get_chat_completions_stream(memory) + if resp is None: + logger.info(f"chat_completions_stream_worker for input text: [{input_text}] failed") + return + + sentence = "" + full_content = "" + first_sentence_sent = False + + for chat_completions in resp: + if start_time < self.outdate_ts: + logger.info(f"chat_completions_stream_worker recv interrupt and flushing for input text: [{input_text}], startTs: {start_time}, outdateTs: {self.outdate_ts}") + break + + if (len(chat_completions.choices) > 0 and chat_completions.choices[0].delta.content is not None): + content = chat_completions.choices[0].delta.content + else: + content = "" + + full_content += content + + while True: + sentence, content, sentence_is_final = parse_sentence(sentence, content) + + if len(sentence) == 0 or not sentence_is_final: + logger.info(f"sentence {sentence} is empty or not final") + break + + logger.info(f"chat_completions_stream_worker recv for input text: [{input_text}] got sentence: [{sentence}]") + + # send sentence + try: + output_data = Data.create("text_data") + output_data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, sentence) + output_data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, False) + ten.send_data(output_data) + logger.info(f"chat_completions_stream_worker recv for input text: [{input_text}] sent sentence [{sentence}]") + except Exception as e: + logger.error(f"chat_completions_stream_worker recv for input text: [{input_text}] send sentence [{sentence}] failed, err: {e}") + break + + sentence = "" + if not first_sentence_sent: + first_sentence_sent = True + logger.info(f"chat_completions_stream_worker recv for input text: [{input_text}] first sentence sent, first_sentence_latency {get_micro_ts() - start_time}ms") + + # remember response as assistant content in memory + memory.append({"role": "assistant", "content": full_content}) + + # send end of segment + try: + output_data = Data.create("text_data") + output_data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, sentence) + output_data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, True) + ten.send_data(output_data) + logger.info(f"chat_completions_stream_worker for input text: [{input_text}] end of segment with sentence [{sentence}] sent") + except Exception as e: + logger.error(f"chat_completions_stream_worker for input text: [{input_text}] end of segment with sentence [{sentence}] send failed, err: {e}") + + except Exception as e: + logger.error(f"chat_completions_stream_worker for input text: [{input_text}] failed, err: {e}") + + # Start thread to request and read responses from LiteLLM + start_time = get_micro_ts() + thread = Thread( + target=chat_completions_stream_worker, + args=(start_time, input_text, self.memory), + ) + thread.start() + logger.info(f"LiteLLMExtension on_data end") diff --git a/agents/ten_packages/bak/litellm_python/log.py b/agents/ten_packages/bak/litellm_python/log.py new file mode 100644 index 0000000000000000000000000000000000000000..fad217105bb59af1d78e19714726079dcbcc209b --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/log.py @@ -0,0 +1,12 @@ +import logging +from .extension import EXTENSION_NAME + +logger = logging.getLogger(EXTENSION_NAME) +logger.setLevel(logging.INFO) + +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(process)d - [%(filename)s:%(lineno)d] - %(message)s") + +console_handler = logging.StreamHandler() +console_handler.setFormatter(formatter) + +logger.addHandler(console_handler) diff --git a/agents/ten_packages/bak/litellm_python/manifest.json b/agents/ten_packages/bak/litellm_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..19e1cda1734117939e9d1a78bd9ed9a3cb801f31 --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/manifest.json @@ -0,0 +1,82 @@ +{ + "type": "extension", + "name": "litellm_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "base_url": { + "type": "string" + }, + "frequency_penalty": { + "type": "float64" + }, + "greeting": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + }, + "max_tokens": { + "type": "int64" + }, + "model": { + "type": "string" + }, + "presence_penalty": { + "type": "float64" + }, + "prompt": { + "type": "string" + }, + "provider": { + "type": "string" + }, + "temperature": { + "type": "float64" + }, + "top_p": { + "type": "float64" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/bak/litellm_python/requirements.txt b/agents/ten_packages/bak/litellm_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2f7601efb1a2307e5649ffd1368665d646fcf12d --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/requirements.txt @@ -0,0 +1 @@ +litellm==1.42.12 \ No newline at end of file diff --git a/agents/ten_packages/bak/litellm_python/utils.py b/agents/ten_packages/bak/litellm_python/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e387d906a86ea4b2b5647a820bb361a9df9f209b --- /dev/null +++ b/agents/ten_packages/bak/litellm_python/utils.py @@ -0,0 +1,19 @@ +import time + + +def get_micro_ts(): + return int(time.time() * 1_000_000) + + +def is_punctuation(char: str): + return char in [",", ",", ".", "。", "?", "?", "!", "!"] + + +def parse_sentence(sentence: str, content: str): + for i, char in enumerate(content): + sentence += char + + if is_punctuation(char): + return sentence, content[i + 1:], True + + return sentence, "", False diff --git a/agents/ten_packages/extension/agora_rtm_wrapper/extension.go b/agents/ten_packages/extension/agora_rtm_wrapper/extension.go new file mode 100644 index 0000000000000000000000000000000000000000..680daae22844bd1813911482c7d630ea88523809 --- /dev/null +++ b/agents/ten_packages/extension/agora_rtm_wrapper/extension.go @@ -0,0 +1,180 @@ +/** + * + * Agora Real Time Engagement + * Created by Wei Hu in 2022-10. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "encoding/json" + "fmt" + "strconv" + + "ten_framework/ten" +) + +// Message colllector represents the text output result +// @Description 输出结果 +type ColllectorMessage struct { + Text string `json:"text"` // 识别出的文本 + IsFinal bool `json:"is_final"` // 是否为最终结果 + StreamID int32 `json:"stream_id"` // 流ID + Type string `json:"data_type"` // 数据类型 + Ts uint64 `json:"text_ts"` // 时间戳 +} + +// Message represents the text output result +// @Description 输出结果 +type Message struct { + Text string `json:"text"` // 识别出的文本 + IsFinal bool `json:"is_final"` // 是否为最终结果 + StreamID string `json:"stream_id"` // 流ID + Type string `json:"type"` // 数据类型 + Ts uint64 `json:"ts"` // 时间戳 +} + +// RtcUserSate represents the rtc user state +// @Description RTC用户状态 +type RtcUserSate struct { + RemoteUserID string `json:"remote_user_id"` // 远程用户ID + State string `json:"state"` // 状态 + Reason string `json:"reason"` // 原因 +} + +type agoraRtmWrapperExtension struct { + ten.DefaultExtension +} + +func newExtension(name string) ten.Extension { + return &agoraRtmWrapperExtension{} +} + +// OnData receives data from ten graph. +func (p *agoraRtmWrapperExtension) OnData( + tenEnv ten.TenEnv, + data ten.Data, +) { + buf, err := data.GetPropertyBytes("data") + if err != nil { + tenEnv.LogError("OnData GetProperty data error: " + err.Error()) + return + } + tenEnv.LogInfo("AGORA_RTM_WRAPPER_EXTENSION OnData: " + string(buf)) + colllectorMessage := ColllectorMessage{} + err = json.Unmarshal(buf, &colllectorMessage) + if err != nil { + tenEnv.LogError("OnData Unmarshal data error: " + err.Error()) + return + } + + message := Message{ + Text: colllectorMessage.Text, + IsFinal: colllectorMessage.IsFinal, + StreamID: strconv.Itoa(int(colllectorMessage.StreamID)), + Type: colllectorMessage.Type, + Ts: colllectorMessage.Ts, + } + jsonBytes, err := json.Marshal(message) + if err != nil { + tenEnv.LogError("failed to marshal JSON: " + err.Error()) + return + } + tenEnv.LogInfo("AGORA_RTM_WRAPPER_EXTENSION OnData: " + string(jsonBytes)) + + cmd, _ := ten.NewCmd("publish") + + err = cmd.SetPropertyBytes("message", jsonBytes) + if err != nil { + tenEnv.LogError("failed to set property message: " + err.Error()) + return + } + if err := tenEnv.SendCmd(cmd, func(_ ten.TenEnv, result ten.CmdResult, _ error) { + status, err := result.GetStatusCode() + tenEnv.LogInfo(fmt.Sprintf("AGORA_RTM_WRAPPER_EXTENSION publish result %d", status)) + if status != ten.StatusCodeOk || err != nil { + tenEnv.LogError("failed to subscribe") + } + }); err != nil { + tenEnv.LogError("failed to send command " + err.Error()) + } +} + +func (p *agoraRtmWrapperExtension) OnCmd(tenEnv ten.TenEnv, cmd ten.Cmd) { + defer func() { + if r := recover(); r != nil { + tenEnv.LogError(fmt.Sprintf("OnCmd panic: %v", r)) + } + cmdResult, err := ten.NewCmdResult(ten.StatusCodeOk) + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to create cmd result: %v", err)) + return + } + tenEnv.ReturnResult(cmdResult, cmd, nil) + }() + cmdName, err := cmd.GetName() + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to get cmd name: %v", err)) + return + } + tenEnv.LogInfo(fmt.Sprintf("received command: %s", cmdName)) + switch cmdName { + case "on_user_audio_track_state_changed": + // on_user_audio_track_state_changed + p.handleUserStateChanged(tenEnv, cmd) + default: + tenEnv.LogWarn(fmt.Sprintf("unsupported cmd: %s", cmdName)) + } +} + +func (p *agoraRtmWrapperExtension) handleUserStateChanged(tenEnv ten.TenEnv, cmd ten.Cmd) { + remoteUserID, err := cmd.GetPropertyString("remote_user_id") + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to get remote_user_id: %v", err)) + return + } + state, err := cmd.GetPropertyInt32("state") + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to get state: %v", err)) + return + } + reason, err := cmd.GetPropertyInt32("reason") + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to get reason: %v", err)) + return + } + userState := RtcUserSate{ + RemoteUserID: remoteUserID, + State: strconv.Itoa(int(state)), + Reason: strconv.Itoa(int(reason)), + } + jsonBytes, err := json.Marshal(userState) + if err != nil { + tenEnv.LogError("failed to marshal JSON: " + err.Error()) + return + } + sendCmd, _ := ten.NewCmd("set_presence_state") + sendCmd.SetPropertyString("states", string(jsonBytes)) + tenEnv.LogInfo("AGORA_RTM_WRAPPER_EXTENSION SetRtmPresenceState " + string(jsonBytes)) + if err := tenEnv.SendCmd(sendCmd, func(_ ten.TenEnv, result ten.CmdResult, _ error) { + status, err := result.GetStatusCode() + tenEnv.LogInfo(fmt.Sprintf("AGORA_RTM_WRAPPER_EXTENSION SetRtmPresenceState result %d", status)) + if status != ten.StatusCodeOk || err != nil { + panic("failed to SetRtmPresenceState") + } + }); err != nil { + tenEnv.LogError("failed to send command " + err.Error()) + } +} + +func init() { + // Register addon + ten.RegisterAddonAsExtension( + "agora_rtm_wrapper", + ten.NewDefaultExtensionAddon(newExtension), + ) +} diff --git a/agents/ten_packages/extension/agora_rtm_wrapper/go.mod b/agents/ten_packages/extension/agora_rtm_wrapper/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..4b9f6b7de258b1a3f9e2f50400f3663d8c92fcf7 --- /dev/null +++ b/agents/ten_packages/extension/agora_rtm_wrapper/go.mod @@ -0,0 +1,7 @@ +module agora_rtm_wrapper + +go 1.20 + +replace ten_framework => ../../system/ten_runtime_go/interface + +require ten_framework v0.0.0-00010101000000-000000000000 diff --git a/agents/ten_packages/extension/agora_rtm_wrapper/manifest.json b/agents/ten_packages/extension/agora_rtm_wrapper/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..8bb8ace6dfd7d6457428b8827fcad2186003120f --- /dev/null +++ b/agents/ten_packages/extension/agora_rtm_wrapper/manifest.json @@ -0,0 +1,32 @@ +{ + "type": "extension", + "name": "agora_rtm_wrapper", + "version": "0.1.4", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + } + ], + "api": { + "data_in": [ + { + "name": "data" + } + ], + "cmd_out": [ + { + "name": "publish", + "property": { + "message": { + "type": "buf" + } + } + }, + { + "name": "set_presence_state" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/agora_rtm_wrapper/property.json b/agents/ten_packages/extension/agora_rtm_wrapper/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/agora_rtm_wrapper/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/__init__.py b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d987b022b5cd11575db6d614e01458a3f2e24b --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/__init__.py @@ -0,0 +1 @@ +from . import vector_storage_addon diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/client.py b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/client.py new file mode 100644 index 0000000000000000000000000000000000000000..48dcdb183dabf77825817027e1b1cb8eb392366e --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/client.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- + +import asyncio +import threading +from typing import Coroutine +from concurrent.futures import Future + + +from alibabacloud_gpdb20160503.client import Client as gpdb20160503Client +from alibabacloud_tea_openapi import models as open_api_models + + +# maybe need multiple clients +class AliGPDBClient: + def __init__(self, ten_env, access_key_id, access_key_secret, endpoint): + self.stopEvent = asyncio.Event() + self.loop = None + self.tasks = asyncio.Queue() + self.access_key_id = access_key_id + self.access_key_secret = access_key_secret + self.endpoint = endpoint + self.client = self.create_client() + self.thread = threading.Thread( + target=asyncio.run, args=(self.__thread_routine(),) + ) + self.thread.start() + self.ten_env = ten_env + + async def stop_thread(self): + self.stopEvent.set() + + def create_client(self) -> gpdb20160503Client: + config = open_api_models.Config( + access_key_id=self.access_key_id, + access_key_secret=self.access_key_secret, + endpoint=self.endpoint, + ) + return gpdb20160503Client(config) + + def get(self) -> gpdb20160503Client: + return self.client + + def close(self): + if (self.loop is not None) and self.thread.is_alive(): + self.stopEvent.set() + asyncio.run_coroutine_threadsafe(self.stop_thread(), self.loop) + self.thread.join() + + async def __thread_routine(self): + self.ten_env.log_info("client __thread_routine start") + self.loop = asyncio.get_running_loop() + tasks = set() + while not self.stopEvent.is_set(): + if not self.tasks.empty(): + coro, future = await self.tasks.get() + try: + task = asyncio.create_task(coro) + tasks.add(task) + task.add_done_callback(lambda t: future.set_result(t.result())) + except Exception as e: + future.set_exception(e) + elif tasks: + done, tasks = await asyncio.wait( + tasks, return_when=asyncio.FIRST_COMPLETED + ) + for task in done: + if task.exception(): + self.ten_env.log_error(f"task exception: {task.exception()}") + future.set_exception(task.exception()) + else: + await asyncio.sleep(0.1) + self.ten_env.log_info("client __thread_routine end") + + async def submit_task(self, coro: Coroutine) -> Future: + future = Future() + await self.tasks.put((coro, future)) + return future + + def submit_task_with_new_thread(self, coro: Coroutine) -> Future: + future = Future() + + def run_coro_in_new_thread(): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + result = loop.run_until_complete(coro) + future.set_result(result) + except Exception as e: + future.set_exception(e) + finally: + loop.close() + + thread = threading.Thread(target=run_coro_in_new_thread) + thread.start() + return future diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/manifest.json b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..0463972fde79256ab508718f9c34c6fc16043174 --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/manifest.json @@ -0,0 +1,121 @@ +{ + "type": "extension", + "name": "aliyun_analyticdb_vector_storage", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "alibaba_cloud_access_key_id": { + "type": "string" + }, + "alibaba_cloud_access_key_secret": { + "type": "string" + }, + "adbpg_instance_id": { + "type": "string" + }, + "adbpg_instance_region": { + "type": "string" + }, + "adbpg_account": { + "type": "string" + }, + "adbpg_account_password": { + "type": "string" + }, + "adbpg_namespace": { + "type": "string" + }, + "adbpg_namespace_password": { + "type": "string" + } + }, + "cmd_in": [ + { + "name": "upsert_vector", + "property": { + "collection_name": { + "type": "string" + }, + "file_name": { + "type": "string" + }, + "content": { + "type": "string" + } + } + }, + { + "name": "query_vector", + "property": { + "collection_name": { + "type": "string" + }, + "top_k": { + "type": "int64" + }, + "embedding": { + "type": "array", + "items": { + "type": "float64" + } + } + }, + "required": [ + "collection_name", + "top_k", + "embedding" + ], + "result": { + "property": { + "response": { + "type": "array", + "items": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "score": { + "type": "float64" + } + } + } + } + } + } + }, + { + "name": "create_collection", + "property": { + "collection_name": { + "type": "string" + }, + "dimension": { + "type": "int32" + } + }, + "required": [ + "collection_name" + ] + }, + { + "name": "delete_collection", + "property": { + "collection_name": { + "type": "string" + } + }, + "required": [ + "collection_name" + ] + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/model.py b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/model.py new file mode 100644 index 0000000000000000000000000000000000000000..1b3b47f34fd02ed700d514b86d264d1485110707 --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/model.py @@ -0,0 +1,546 @@ +# -*- coding: utf-8 -*- + +from alibabacloud_gpdb20160503 import models as gpdb_20160503_models # type: ignore +import time +import json +from typing import Dict, List, Any, Tuple +from alibabacloud_tea_util import models as util_models + + +class Model: + def __init__(self, ten_env, region_id, dbinstance_id, client): + self.region_id = region_id + self.dbinstance_id = dbinstance_id + self.client = client + self.read_timeout = 10 * 1000 + self.connect_timeout = 10 * 1000 + self.ten_env = ten_env + + def get_client(self): + return self.client.get() + + def init_vector_database(self, account, account_password) -> None: + try: + request = gpdb_20160503_models.InitVectorDatabaseRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().init_vector_database_with_options( + request, runtime + ) + self.ten_env.log_debug( + f"init_vector_database response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + async def init_vector_database_async(self, account, account_password) -> None: + try: + request = gpdb_20160503_models.InitVectorDatabaseRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().init_vector_database_with_options_async( + request, runtime + ) + self.ten_env.log_debug( + f"init_vector_database response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + def create_namespace( + self, account, account_password, namespace, namespace_password + ) -> None: + try: + request = gpdb_20160503_models.CreateNamespaceRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + namespace=namespace, + namespace_password=namespace_password, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().create_namespace_with_options(request, runtime) + self.ten_env.log_debug( + f"create_namespace response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + async def create_namespace_async( + self, account, account_password, namespace, namespace_password + ) -> None: + try: + request = gpdb_20160503_models.CreateNamespaceRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + namespace=namespace, + namespace_password=namespace_password, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().create_namespace_with_options_async( + request, runtime + ) + self.ten_env.log_debug( + f"create_namespace response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + def create_collection( + self, + account, + account_password, + namespace, + collection, + parser: str = None, + metrics: str = None, + hnsw_m: int = None, + pq_enable: int = None, + external_storage: int = None, + ) -> None: + try: + metadata = '{"update_ts": "bigint", "file_name": "text", "content": "text"}' + full_text_retrieval_fields = "update_ts,file_name" + request = gpdb_20160503_models.CreateCollectionRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + namespace=namespace, + collection=collection, + metadata=metadata, + full_text_retrieval_fields=full_text_retrieval_fields, + parser=parser, + metrics=metrics, + hnsw_m=hnsw_m, + pq_enable=pq_enable, + external_storage=external_storage, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().create_collection_with_options( + request, runtime + ) + self.ten_env.log_debug( + f"create_document_collection response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + async def create_collection_async( + self, + account, + account_password, + namespace, + collection, + parser: str = None, + metrics: str = None, + hnsw_m: int = None, + pq_enable: int = None, + external_storage: int = None, + ) -> None: + try: + metadata = '{"update_ts": "bigint", "file_name": "text", "content": "text"}' + full_text_retrieval_fields = "update_ts,file_name" + request = gpdb_20160503_models.CreateCollectionRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + namespace=namespace, + collection=collection, + metadata=metadata, + full_text_retrieval_fields=full_text_retrieval_fields, + parser=parser, + metrics=metrics, + hnsw_m=hnsw_m, + pq_enable=pq_enable, + external_storage=external_storage, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().create_collection_with_options_async( + request, runtime + ) + self.ten_env.log_debug( + f"create_document_collection response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + def delete_collection(self, namespace, namespace_password, collection) -> None: + try: + request = gpdb_20160503_models.DeleteCollectionRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + namespace_password=namespace_password, + namespace=namespace, + collection=collection, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().delete_collection_with_options( + request, runtime + ) + self.ten_env.log_debug( + f"delete_collection response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + async def delete_collection_async( + self, namespace, namespace_password, collection + ) -> None: + try: + request = gpdb_20160503_models.DeleteCollectionRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + namespace_password=namespace_password, + namespace=namespace, + collection=collection, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().delete_collection_with_options_async( + request, runtime + ) + self.ten_env.log_info( + f"delete_collection response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + def upsert_collection_data( + self, + collection, + namespace, + namespace_password, + rows: List[Tuple[str, str, List[float]]] = None, + ) -> None: + try: + request_rows = [] + for row in rows: + file_name = row[0] + content = row[1] + vector = row[2] + metadata = { + "update_ts": int(time.time() * 1000), + "file_name": file_name, + "content": content, + } + request_row = gpdb_20160503_models.UpsertCollectionDataRequestRows( + metadata=metadata, vector=vector + ) + request_rows.append(request_row) + upsert_collection_data_request = ( + gpdb_20160503_models.UpsertCollectionDataRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + collection=collection, + namespace_password=namespace_password, + namespace=namespace, + rows=request_rows, + ) + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().upsert_collection_data_with_options( + upsert_collection_data_request, runtime + ) + self.ten_env.log_debug( + f"upsert_collection response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + async def upsert_collection_data_async( + self, + collection, + namespace, + namespace_password, + rows: List[Tuple[str, str, List[float]]] = None, + ) -> None: + try: + request_rows = [] + for row in rows: + file_name = row[0] + content = row[1] + vector = row[2] + metadata = { + "update_ts": int(time.time() * 1000), + "file_name": file_name, + "content": content, + } + request_row = gpdb_20160503_models.UpsertCollectionDataRequestRows( + metadata=metadata, vector=vector + ) + request_rows.append(request_row) + upsert_collection_data_request = ( + gpdb_20160503_models.UpsertCollectionDataRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + collection=collection, + namespace_password=namespace_password, + namespace=namespace, + rows=request_rows, + ) + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = ( + await self.get_client().upsert_collection_data_with_options_async( + upsert_collection_data_request, runtime + ) + ) + self.ten_env.log_debug( + f"upsert_collection response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + # pylint: disable=redefined-builtin + def query_collection_data( + self, + collection, + namespace, + namespace_password, + vector: List[float] = None, + top_k: int = 10, + content: str = None, + filter: str = None, + hybrid_search: str = None, + hybrid_search_args: Dict[str, dict] = None, + include_metadata_fields: str = None, + include_values: bool = None, + metrics: str = None, + ) -> Tuple[Any, Any]: + try: + query_collection_data_request = ( + gpdb_20160503_models.QueryCollectionDataRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + collection=collection, + namespace_password=namespace_password, + namespace=namespace, + vector=vector, + top_k=top_k, + content=content, + filter=filter, + hybrid_search=hybrid_search, + hybrid_search_args=hybrid_search_args, + include_metadata_fields=include_metadata_fields, + include_values=include_values, + metrics=metrics, + ) + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().query_collection_data_with_options( + query_collection_data_request, runtime + ) + self.ten_env.log_debug(f"query_collection response code: {response.status_code}") + return response, None + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return None, e + + # pylint: disable=redefined-builtin + async def query_collection_data_async( + self, + collection, + namespace, + namespace_password, + vector: List[float] = None, + top_k: int = 10, + content: str = None, + filter: str = None, + hybrid_search: str = None, + hybrid_search_args: Dict[str, dict] = None, + include_metadata_fields: str = None, + include_values: bool = None, + metrics: str = None, + ) -> Tuple[Any, Any]: + try: + query_collection_data_request = ( + gpdb_20160503_models.QueryCollectionDataRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + collection=collection, + namespace_password=namespace_password, + namespace=namespace, + vector=vector, + top_k=top_k, + content=content, + filter=filter, + hybrid_search=hybrid_search, + hybrid_search_args=hybrid_search_args, + include_metadata_fields=include_metadata_fields, + include_values=include_values, + metrics=metrics, + ) + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().query_collection_data_with_options_async( + query_collection_data_request, runtime + ) + self.ten_env.log_debug(f"query_collection response code: {response.status_code}") + return response, None + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return None, e + + def parse_collection_data( + self, body: gpdb_20160503_models.QueryCollectionDataResponseBody + ) -> str: + try: + matches = body.to_map()["Matches"]["match"] + results = [ + {"content": match["Metadata"]["content"], "score": match["Score"]} + for match in matches + ] + results.sort(key=lambda x: x["score"], reverse=True) + json_str = json.dumps(results) + return json_str + except Exception as e: + self.ten_env.log_error( + f"parse collection data failed, error: {e}, data: {body.to_map()}" + ) + return "[]" + + def list_collections(self, namespace, namespace_password) -> Tuple[List[str], Any]: + try: + request = gpdb_20160503_models.ListCollectionsRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + namespace=namespace, + namespace_password=namespace_password, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().list_collections_with_options(request, runtime) + self.ten_env.log_debug( + f"list_collections response code: {response.status_code}, body:{response.body}" + ) + collections = response.body.to_map()["Collections"]["collection"] + return collections, None + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return [], e + + async def list_collections_async( + self, namespace, namespace_password + ) -> Tuple[List[str], Any]: + try: + request = gpdb_20160503_models.ListCollectionsRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + namespace=namespace, + namespace_password=namespace_password, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().list_collections_with_options_async( + request, runtime + ) + self.ten_env.log_debug( + f"list_collections response code: {response.status_code}, body:{response.body}" + ) + collections = response.body.to_map()["Collections"]["collection"] + return collections, None + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return [], e + + def create_vector_index( + self, account, account_password, namespace, collection, dimension + ) -> None: + try: + request = gpdb_20160503_models.CreateVectorIndexRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + namespace=namespace, + collection=collection, + dimension=dimension, + pq_enable=0, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = self.get_client().create_vector_index_with_options( + request, runtime + ) + self.ten_env.log_debug( + f"create_vector_index response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e + + async def create_vector_index_async( + self, account, account_password, namespace, collection, dimension + ) -> None: + try: + request = gpdb_20160503_models.CreateVectorIndexRequest( + region_id=self.region_id, + dbinstance_id=self.dbinstance_id, + manager_account=account, + manager_account_password=account_password, + namespace=namespace, + collection=collection, + dimension=dimension, + pq_enable=0, + ) + runtime = util_models.RuntimeOptions( + read_timeout=self.read_timeout, connect_timeout=self.connect_timeout + ) + response = await self.get_client().create_vector_index_with_options_async( + request, runtime + ) + self.ten_env.log_debug( + f"create_vector_index response code: {response.status_code}, body:{response.body}" + ) + except Exception as e: + self.ten_env.log_error(f"Error: {e}") + return e diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/property.json b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/requirements.txt b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa0bed408c751c481b50234b353bbcc6bcf5251b --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/requirements.txt @@ -0,0 +1 @@ +alibabacloud_gpdb20160503 \ No newline at end of file diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/vector_storage_addon.py b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/vector_storage_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..aff84dd34587ae445a9f760b55743cfa30c1e33f --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/vector_storage_addon.py @@ -0,0 +1,13 @@ +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("aliyun_analyticdb_vector_storage") +class AliPGDBExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + from .vector_storage_extension import AliPGDBExtension + ten.log_info("on_create_instance") + ten.on_create_instance_done(AliPGDBExtension(addon_name), context) diff --git a/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/vector_storage_extension.py b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/vector_storage_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..81e300e26d56f346feeb64e07f3dac23ab15d1d1 --- /dev/null +++ b/agents/ten_packages/extension/aliyun_analyticdb_vector_storage/vector_storage_extension.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# + +import asyncio +import os +import json +from ten import ( + Extension, + TenEnv, + Cmd, + Data, + StatusCode, + CmdResult, +) + +import threading +from datetime import datetime + + +class AliPGDBExtension(Extension): + def __init__(self, name): + self.stopEvent = asyncio.Event() + self.thread = None + self.loop = None + self.access_key_id = os.environ.get("ALIBABA_CLOUD_ACCESS_KEY_ID") + self.access_key_secret = os.environ.get("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + self.region_id = os.environ.get("ADBPG_INSTANCE_REGION") + self.dbinstance_id = os.environ.get("ADBPG_INSTANCE_ID") + self.endpoint = "gpdb.aliyuncs.com" + self.model = None + self.account = os.environ.get("ADBPG_ACCOUNT") + self.account_password = os.environ.get("ADBPG_ACCOUNT_PASSWORD") + self.namespace = os.environ.get("ADBPG_NAMESPACE") + self.namespace_password = os.environ.get("ADBPG_NAMESPACE_PASSWORD") + + async def __thread_routine(self, ten_env: TenEnv): + ten_env.log_info("__thread_routine start") + self.loop = asyncio.get_running_loop() + ten_env.on_start_done() + await self.stopEvent.wait() + + async def stop_thread(self): + self.stopEvent.set() + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("on_start") + self.access_key_id = self.get_property_string( + ten, "ALIBABA_CLOUD_ACCESS_KEY_ID", self.access_key_id + ) + self.access_key_secret = self.get_property_string( + ten, "ALIBABA_CLOUD_ACCESS_KEY_SECRET", self.access_key_secret + ) + self.region_id = self.get_property_string( + ten, "ADBPG_INSTANCE_REGION", self.region_id + ) + self.dbinstance_id = self.get_property_string( + ten, "ADBPG_INSTANCE_ID", self.dbinstance_id + ) + self.account = self.get_property_string(ten, "ADBPG_ACCOUNT", self.account) + self.account_password = self.get_property_string( + ten, "ADBPG_ACCOUNT_PASSWORD", self.account_password + ) + self.namespace = self.get_property_string( + ten, "ADBPG_NAMESPACE", self.namespace + ) + self.namespace_password = self.get_property_string( + ten, "ADBPG_NAMESPACE_PASSWORD", self.namespace_password + ) + + if self.region_id in ( + "cn-beijing", + "cn-hangzhou", + "cn-shanghai", + "cn-shenzhen", + "cn-hongkong", + "ap-southeast-1", + "cn-hangzhou-finance", + "cn-shanghai-finance-1", + "cn-shenzhen-finance-1", + "cn-beijing-finance-1", + ): + self.endpoint = "gpdb.aliyuncs.com" + else: + self.endpoint = f"gpdb.{self.region_id}.aliyuncs.com" + + # lazy import packages which requires long time to load + from .client import AliGPDBClient + from .model import Model + + client = AliGPDBClient( + ten, self.access_key_id, self.access_key_secret, self.endpoint + ) + self.model = Model(ten, self.region_id, self.dbinstance_id, client) + self.thread = threading.Thread( + target=asyncio.run, args=(self.__thread_routine(ten),) + ) + + # Then 'on_start_done' will be called in the thread + self.thread.start() + return + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("on_stop") + if self.thread is not None and self.thread.is_alive(): + asyncio.run_coroutine_threadsafe(self.stop_thread(), self.loop) + self.thread.join() + self.thread = None + ten.on_stop_done() + return + + def on_data(self, ten: TenEnv, data: Data) -> None: + pass + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + try: + cmd_name = cmd.get_name() + ten.log_info(f"on_cmd [{cmd_name}]") + if cmd_name == "create_collection": + asyncio.run_coroutine_threadsafe( + self.async_create_collection(ten, cmd), self.loop + ) + elif cmd_name == "delete_collection": + asyncio.run_coroutine_threadsafe( + self.async_delete_collection(ten, cmd), self.loop + ) + elif cmd_name == "upsert_vector": + asyncio.run_coroutine_threadsafe( + self.async_upsert_vector(ten, cmd), self.loop + ) + elif cmd_name == "query_vector": + asyncio.run_coroutine_threadsafe( + self.async_query_vector(ten, cmd), self.loop + ) + else: + ten.return_result(CmdResult.create(StatusCode.ERROR), cmd) + except Exception: + ten.return_result(CmdResult.create(StatusCode.ERROR), cmd) + + async def async_create_collection(self, ten: TenEnv, cmd: Cmd): + collection = cmd.get_property_string("collection_name") + dimension = 1024 + try: + dimension = cmd.get_property_int("dimension") + except Exception as e: + ten.log_warn(f"Error: {e}") + + err = await self.model.create_collection_async( + self.account, self.account_password, self.namespace, collection + ) + if err is None: + await self.model.create_vector_index_async( + self.account, + self.account_password, + self.namespace, + collection, + dimension, + ) + ten.return_result(CmdResult.create(StatusCode.OK), cmd) + else: + ten.return_result(CmdResult.create(StatusCode.ERROR), cmd) + + async def async_upsert_vector(self, ten: TenEnv, cmd: Cmd): + start_time = datetime.now() + collection = cmd.get_property_string("collection_name") + file = cmd.get_property_string("file_name") + content = cmd.get_property_string("content") + obj = json.loads(content) + rows = [(file, item["text"], item["embedding"]) for item in obj] + + err = await self.model.upsert_collection_data_async( + collection, self.namespace, self.namespace_password, rows + ) + ten.log_info( + f"upsert_vector finished for file {file}, collection {collection}, rows len {len(rows)}, err {err}, cost {int((datetime.now() - start_time).total_seconds() * 1000)}ms" + ) + if err is None: + ten.return_result(CmdResult.create(StatusCode.OK), cmd) + else: + ten.return_result(CmdResult.create(StatusCode.ERROR), cmd) + + async def async_query_vector(self, ten: TenEnv, cmd: Cmd): + start_time = datetime.now() + collection = cmd.get_property_string("collection_name") + embedding = cmd.get_property_to_json("embedding") + top_k = cmd.get_property_int("top_k") + vector = json.loads(embedding) + response, error = await self.model.query_collection_data_async( + collection, self.namespace, self.namespace_password, vector, top_k=top_k + ) + ten.log_info( + f"query_vector finished for collection {collection}, embedding len {len(embedding)}, err {error}, cost {int((datetime.now() - start_time).total_seconds() * 1000)}ms" + ) + + if error: + return ten.return_result(CmdResult.create(StatusCode.ERROR), cmd) + else: + body = self.model.parse_collection_data(response.body) + ret = CmdResult.create(StatusCode.OK) + ret.set_property_from_json("response", body) + ten.return_result(ret, cmd) + + async def async_delete_collection(self, ten: TenEnv, cmd: Cmd): + collection = cmd.get_property_string("collection_name") + # pylint: disable=too-many-function-args + err = await self.model.delete_collection_async( + self.account, self.account_password, self.namespace, collection + ) + if err is None: + return ten.return_result(CmdResult.create(StatusCode.OK), cmd) + else: + return ten.return_result(CmdResult.create(StatusCode.ERROR), cmd) + + def get_property_string(self, ten: TenEnv, key: str, default: str) -> str: + try: + return ten.get_property_string(key.lower()) + except Exception as e: + ten.log_error(f"Error: {e}") + return default diff --git a/agents/ten_packages/extension/aliyun_text_embedding/__init__.py b/agents/ten_packages/extension/aliyun_text_embedding/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61530b41f06873faaad58fe23361c59df5fadc35 --- /dev/null +++ b/agents/ten_packages/extension/aliyun_text_embedding/__init__.py @@ -0,0 +1 @@ +from . import embedding_addon diff --git a/agents/ten_packages/extension/aliyun_text_embedding/embedding_addon.py b/agents/ten_packages/extension/aliyun_text_embedding/embedding_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..8a02bbc676198cf8004c59b18a8ac8a9403d4d18 --- /dev/null +++ b/agents/ten_packages/extension/aliyun_text_embedding/embedding_addon.py @@ -0,0 +1,13 @@ +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("aliyun_text_embedding") +class EmbeddingExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + from .embedding_extension import EmbeddingExtension + ten.log_info("on_create_instance") + ten.on_create_instance_done(EmbeddingExtension(addon_name), context) diff --git a/agents/ten_packages/extension/aliyun_text_embedding/embedding_extension.py b/agents/ten_packages/extension/aliyun_text_embedding/embedding_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2f24a37298f13c123a7feb15c09f2d2d5bee17 --- /dev/null +++ b/agents/ten_packages/extension/aliyun_text_embedding/embedding_extension.py @@ -0,0 +1,193 @@ +from ten import ( + Extension, + TenEnv, + Cmd, + StatusCode, + CmdResult, +) + +import json +from typing import Generator, List +from http import HTTPStatus +import threading, queue +from datetime import datetime + +CMD_EMBED = "embed" +CMD_EMBED_BATCH = "embed_batch" + +FIELD_KEY_EMBEDDING = "embedding" +FIELD_KEY_EMBEDDINGS = "embeddings" +FIELD_KEY_MESSAGE = "message" +FIELD_KEY_CODE = "code" + +DASHSCOPE_MAX_BATCH_SIZE = 6 + + +class EmbeddingExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + self.api_key = "" + self.model = "" + + self.stop = False + self.queue = queue.Queue() + self.threads = [] + + # workaround to speed up the embedding process, + # should be replace by https://help.aliyun.com/zh/model-studio/developer-reference/text-embedding-batch-api?spm=a2c4g.11186623.0.0.24cb7453KSjdhC + # once v3 models supported + self.parallel = 10 + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("on_start") + self.api_key = self.get_property_string(ten, "api_key", self.api_key) + self.model = self.get_property_string(ten, "model", self.api_key) + + # lazy import packages which requires long time to load + global dashscope # pylint: disable=global-statement + import dashscope + + dashscope.api_key = self.api_key + + for i in range(self.parallel): + thread = threading.Thread(target=self.async_handler, args=[i, ten]) + thread.start() + self.threads.append(thread) + + ten.on_start_done() + + def async_handler(self, index: int, ten: TenEnv): + ten.log_info(f"async_handler {index} statend") + + while not self.stop: + cmd = self.queue.get() + if cmd is None: + break + + cmd_name = cmd.get_name() + start_time = datetime.now() + ten.log_info(f"async_handler {index} processing cmd {cmd_name}") + + if cmd_name == CMD_EMBED: + cmd_result = self.call_with_str(cmd.get_property_string("input"), ten) + ten.return_result(cmd_result, cmd) + elif cmd_name == CMD_EMBED_BATCH: + inputs_list = json.loads(cmd.get_property_to_json("inputs")) + cmd_result = self.call_with_strs(inputs_list, ten) + ten.return_result(cmd_result, cmd) + else: + ten.log_warn("unknown cmd {cmd_name}") + + ten.log_info( + f"async_handler {index} finished processing cmd {cmd_name}, cost {int((datetime.now() - start_time).total_seconds() * 1000)}ms" + ) + + ten.log_info(f"async_handler {index} stopped") + + def call_with_str(self, message: str, ten: TenEnv) -> CmdResult: + start_time = datetime.now() + # pylint: disable=undefined-variable + response = dashscope.TextEmbedding.call(model=self.model, input=message) + ten.log_info( + f"embedding call finished for input [{message}], status_code {response.status_code}, cost {int((datetime.now() - start_time).total_seconds() * 1000)}ms" + ) + + if response.status_code == HTTPStatus.OK: + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_from_json( + FIELD_KEY_EMBEDDING, + json.dumps(response.output["embeddings"][0]["embedding"]), + ) + return cmd_result + else: + cmd_result = CmdResult.create(StatusCode.ERROR) + cmd_result.set_property_string(FIELD_KEY_CODE, response.status_code) + cmd_result.set_property_string(FIELD_KEY_MESSAGE, response.message) + return cmd_result + + def batched( + self, inputs: List, batch_size: int = DASHSCOPE_MAX_BATCH_SIZE + ) -> Generator[List, None, None]: + for i in range(0, len(inputs), batch_size): + yield inputs[i : i + batch_size] + + def call_with_strs(self, messages: List[str], ten: TenEnv) -> CmdResult: + start_time = datetime.now() + result = None # merge the results. + batch_counter = 0 + for batch in self.batched(messages): + # pylint: disable=undefined-variable + response = dashscope.TextEmbedding.call(model=self.model, input=batch) + # ten.log_info("%s Received %s", batch, response) + if response.status_code == HTTPStatus.OK: + if result is None: + result = response.output + else: + for emb in response.output["embeddings"]: + emb["text_index"] += batch_counter + result["embeddings"].append(emb) + else: + ten.log_error("call %s failed, errmsg: %s", batch, response) + batch_counter += len(batch) + + ten.log_info( + f"embedding call finished for inputs len {len(messages)}, batch_counter {batch_counter}, results len {len(result['embeddings'])}, cost {int((datetime.now() - start_time).total_seconds() * 1000)}ms " + ) + if result is not None: + cmd_result = CmdResult.create(StatusCode.OK) + + # too slow `set_property_to_json`, so use `set_property_string` at the moment as workaround + # will be replaced once `set_property_to_json` improved + cmd_result.set_property_string( + FIELD_KEY_EMBEDDINGS, json.dumps(result["embeddings"]) + ) + return cmd_result + else: + cmd_result = CmdResult.create(StatusCode.ERROR) + cmd_result.set_property_string(FIELD_KEY_MESSAGE, "All batch failed") + ten.log_error("All batch failed") + return cmd_result + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("on_stop") + self.stop = True + # clear queue + while not self.queue.empty(): + self.queue.get() + # put enough None to stop all threads + for thread in self.threads: + self.queue.put(None) + for thread in self.threads: + thread.join() + self.threads = [] + + ten.on_stop_done() + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + + if cmd_name in [CMD_EMBED, CMD_EMBED_BATCH]: + # // embed + # { + # "name": "embed", + # "input": "hello" + # } + + # // embed_batch + # { + # "name": "embed_batch", + # "inputs": ["hello", ...] + # } + + self.queue.put(cmd) + else: + ten.log_warn(f"unknown cmd {cmd_name}") + cmd_result = CmdResult.create(StatusCode.ERROR) + ten.return_result(cmd_result, cmd) + + def get_property_string(self, ten: TenEnv, key, default): + try: + return ten.get_property_string(key) + except Exception as e: + ten.log_warn(f"err: {e}") + return default diff --git a/agents/ten_packages/extension/aliyun_text_embedding/manifest.json b/agents/ten_packages/extension/aliyun_text_embedding/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..2164f24d0d1a97dac0b740f48f19e217009898ad --- /dev/null +++ b/agents/ten_packages/extension/aliyun_text_embedding/manifest.json @@ -0,0 +1,78 @@ +{ + "type": "extension", + "name": "aliyun_text_embedding", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model": { + "type": "string" + } + }, + "cmd_in": [ + { + "name": "embed", + "property": { + "input": { + "type": "string" + } + }, + "required": [ + "input" + ], + "result": { + "property": { + "embedding": { + "type": "array", + "items": { + "type": "float64" + } + }, + "code": { + "type": "string" + }, + "message": { + "type": "string" + } + } + } + }, + { + "name": "embed_batch", + "property": { + "inputs": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "inputs" + ], + "result": { + "property": { + "embeddings": { + "type": "string" + }, + "code": { + "type": "string" + }, + "message": { + "type": "string" + } + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/aliyun_text_embedding/property.json b/agents/ten_packages/extension/aliyun_text_embedding/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/aliyun_text_embedding/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/aliyun_text_embedding/requirements.txt b/agents/ten_packages/extension/aliyun_text_embedding/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5899464f479117dceab904f0dbe7a4f7459cf02e --- /dev/null +++ b/agents/ten_packages/extension/aliyun_text_embedding/requirements.txt @@ -0,0 +1 @@ +dashscope \ No newline at end of file diff --git a/agents/ten_packages/extension/bedrock_llm_python/README.md b/agents/ten_packages/extension/bedrock_llm_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1565bd5644b013e962162b6e0b508684c86c3efb --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/README.md @@ -0,0 +1,105 @@ +## Amazon Bedrock LLM Extension + +### Configurations + +You can config this extension by providing following environments: + +| Env | Required | Default | Notes | +| -- | -- | -- | -- | +| AWS_REGION | No | us-east-1 | The Region of Amazon Bedrock service you want to use. | +| AWS_ACCESS_KEY_ID | No | - | Access Key of your IAM User, make sure you've set proper permissions to [invoke Bedrock models](https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html) and gain [models access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) in Bedrock. Will use default credentials provider if not provided. Check [document](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). | +| AWS_SECRET_ACCESS_KEY | No | - | Secret Key of your IAM User, make sure you've set proper permissions to [invoke Bedrock models](https://docs.aws.amazon.com/bedrock/latest/userguide/security_iam_id-based-policy-examples.html) and gain [models access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) in Bedrock. Will use default credentials provider if not provided. Check [document](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). | +| AWS_BEDROCK_MODEL | No | Nova (https://docs.aws.amazon.com/nova/latest/userguide/what-is-nova.html) | Bedrock model id, check [docuement](https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns). | + +## Features + +- Real-time video and audio interaction similar to Gemini 2.0 +- Audio recognition using TEN framework's STT plugin +- Text-to-speech conversion using TEN framework's TTS plugin +- Integration with AWS Bedrock's Nova model +- Smart input truncation logic +- Multi-language support + +## Requirements +- Python 3.9+ +- AWS account with Bedrock access +- TEN framework with STT and TTS plugins +- Dependencies listed in requirements.txt + +## Installation + +1. Install dependencies: +```bash +pip install -r requirements.txt +``` + +2. Configure AWS credentials: +- Set up AWS credentials with Bedrock access +- Update the api_key in configuration + +## Configuration + +The extension can be configured through manifest.json properties: +- `base_uri`: Bedrock API endpoint +- `region`: AWS region for Bedrock +- `aws_access_key_id`: AWS access key ID +- `aws_secret_access_key`: AWS secret access key +- `model_id`: Bedrock Nova model ID +- `language`: Language code for STT/TTS +- See manifest.json for full configuration options + +## Input Truncation Logic + +The extension implements smart input truncation: + +1. Duration-based truncation: + - Automatically truncates input exceeding 30 seconds + +2. Silence-based truncation: + - Triggers when silence exceeds 2 seconds + +3. Manual truncation: + - Supports user-initiated truncation + +## Architecture + +1. Audio Processing: + - Uses TEN framework's STT plugin for audio recognition + - Buffers and processes audio in real-time + - Provides intermediate and final transcripts + +2. Nova Model Integration: + - Combines transcribed text with video input + - Sends to Bedrock's Nova model for processing + - Handles responses and error conditions + +3. Speech Synthesis: + - Converts Nova model responses to speech + - Uses TEN framework's TTS plugin + - Synchronizes with video output + +## API Usage + +### Commands + +1. Flush Command: +```python +cmd = Cmd.create("flush") +await ten_env.send_cmd(cmd) +``` + +2. User Events: +```python +# User joined +cmd = Cmd.create("on_user_joined") +await ten_env.send_cmd(cmd) + +# User left +cmd = Cmd.create("on_user_left") +await ten_env.send_cmd(cmd) +``` + +## Contributing +1. Fork the repository +2. Create a feature branch +3. Submit a pull request \ No newline at end of file diff --git a/agents/ten_packages/extension/bedrock_llm_python/__init__.py b/agents/ten_packages/extension/bedrock_llm_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/bedrock_llm_python/addon.py b/agents/ten_packages/extension/bedrock_llm_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..8b2cb8da1465d878d07df20d010ec192132e7646 --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/addon.py @@ -0,0 +1,18 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import BedrockLLMExtension + + +@register_addon_as_extension("bedrock_llm_python") +class LLMExtensionExtensionAddon(Addon): + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + ten_env.log_info("on_create_instance") + ten_env.on_create_instance_done(BedrockLLMExtension(name), context) diff --git a/agents/ten_packages/extension/bedrock_llm_python/extension.py b/agents/ten_packages/extension/bedrock_llm_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..b3e13298b4e28256109ecaa0ef9ebc2995770b3e --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/extension.py @@ -0,0 +1,446 @@ +#!/usr/bin/env python3 +# +# Agora Real Time Engagement +# Created by Cline in 2024-03. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +import asyncio +import time +import traceback +from enum import Enum +from typing import Optional, List, Dict + +import boto3 +from ten import ( + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +from ten_ai_base.config import BaseConfig +from ten_ai_base.llm import AsyncLLMBaseExtension +from dataclasses import dataclass + +from .utils import ( + rgb2base64jpeg, + filter_images, + parse_sentence, + get_greeting_text, + merge_images +) + +# Constants +MAX_IMAGE_COUNT = 20 +ONE_BATCH_SEND_COUNT = 6 +VIDEO_FRAME_INTERVAL = 0.5 + +# Command definitions +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" + +# Data property definitions +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT = "end_of_segment" + +class Role(str, Enum): + """Role definitions for chat participants.""" + User = "user" + Assistant = "assistant" + +@dataclass +class BedrockLLMConfig(BaseConfig): + """Configuration for BedrockV2V extension.""" + region: str = "us-east-1" + model_id: str = "us.amazon.nova-lite-v1:0" + access_key_id: str = "" + secret_access_key: str = "" + language: str = "en-US" + prompt: str = "You are an intelligent assistant with real-time interaction capabilities. You will be presented with a series of images that represent a video sequence. Describe what you see directly, as if you were observing the scene in real-time. Do not mention that you are looking at images or a video. Instead, narrate the scene and actions as they unfold. Engage in conversation with the user based on this visual input and their questions, maintaining a concise and clear." + temperature: float = 0.7 + max_tokens: int = 256 + tokP: str = 0.5 + topK: str = 10 + max_duration: int = 30 + vendor: str = "" + stream_id: int = 0 + dump: bool = False + max_memory_length: int = 10 + is_memory_enabled: bool = False + is_enable_video: bool = False + greeting: str = "Hello, I'm here to help you. How can I assist you today?" + + def build_ctx(self) -> dict: + """Build context dictionary from configuration.""" + return { + "language": self.language, + "model": self.model_id, + } + +class BedrockLLMExtension(AsyncLLMBaseExtension): + """Extension for handling video-to-video processing using AWS Bedrock.""" + + def __init__(self, name: str): + super().__init__(name) + self.config: Optional[BedrockLLMConfig] = None + self.stopped: bool = False + self.memory: list = [] + self.users_count: int = 0 + self.bedrock_client = None + self.image_buffers: list = [] + self.image_queue = asyncio.Queue() + self.text_buffer: str = "" + self.input_start_time: float = 0 + self.processing_times = [] + self.ten_env = None + self.ctx = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + """Initialize the extension.""" + await super().on_init(ten_env) + ten_env.log_info("BedrockV2VExtension initialized") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + """Start the extension and set up required components.""" + await super().on_start(ten_env) + ten_env.log_info("BedrockV2VExtension starting") + + try: + self.config = await BedrockLLMConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"Configuration: {self.config}") + + if not self.config.access_key_id or not self.config.secret_access_key: + ten_env.log_error("AWS credentials (access_key_id and secret_access_key) are required") + return + + await self._setup_components(ten_env) + + except Exception as e: + traceback.print_exc() + ten_env.log_error(f"Failed to initialize: {e}") + + async def _setup_components(self, ten_env: AsyncTenEnv) -> None: + """Set up extension components.""" + self.memory = [] + self.ctx = self.config.build_ctx() + self.ten_env = ten_env + + self.loop = asyncio.get_event_loop() + self.loop.create_task(self._on_video(ten_env)) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + """Stop the extension.""" + await super().on_stop(ten_env) + ten_env.log_info("BedrockV2VExtension stopping") + self.stopped = True + + async def on_data(self, ten_env: AsyncTenEnv, data) -> None: + """Handle incoming data.""" + ten_env.log_info("on_data receive begin...") + data_name = data.get_name() + ten_env.log_info(f"on_data name {data_name}") + + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + + if not is_final: + ten_env.log_info("ignore non-final input") + return + + if not input_text: + ten_env.log_info("ignore empty text") + return + + ten_env.log_info(f"OnData input text: [{input_text}]") + self.text_buffer = input_text + await self._handle_input_truncation("is_final") + + except Exception as err: + ten_env.log_info(f"Error processing data: {err}") + + async def on_video_frame(self, _: AsyncTenEnv, video_frame) -> None: + """Handle incoming video frames.""" + if not self.config.is_enable_video: + return + image_data = video_frame.get_buf() + image_width = video_frame.get_width() + image_height = video_frame.get_height() + await self.image_queue.put([image_data, image_width, image_height]) + + async def _on_video(self, ten_env: AsyncTenEnv): + """Process video frames from the queue.""" + while True: + try: + [image_data, image_width, image_height] = await self.image_queue.get() + + #ten_env.log_info(f"image_width: {image_width}, image_height: {image_height}, image_size: {len(bytes(image_data)) / 1024 / 1024}MB") + + frame_buffer = rgb2base64jpeg(image_data, image_width, image_height) + + self.image_buffers.append(frame_buffer) + + #ten_env.log_info(f"Processed frame, width: {image_width}, height: {image_height}, frame_buffer_size: {len(frame_buffer) / 1024 / 1024}MB") + + while len(self.image_buffers) > MAX_IMAGE_COUNT: + self.image_buffers.pop(0) + + # Skip remaining frames for the interval + while not self.image_queue.empty(): + await self.image_queue.get() + + await asyncio.sleep(VIDEO_FRAME_INTERVAL) + + except Exception as e: + traceback.print_exc() + ten_env.log_error(f"Error processing video frame: {e}") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + """Handle incoming commands.""" + cmd_name = cmd.get_name() + ten_env.log_info(f"Command received: {cmd_name}") + + try: + if cmd_name == CMD_IN_FLUSH: + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + elif cmd_name == CMD_IN_ON_USER_JOINED: + await self._handle_user_joined() + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "success") + await ten_env.return_result(cmd_result, cmd) + + except Exception as e: + traceback.print_exc() + ten_env.log_error(f"Error handling command {cmd_name}: {e}") + cmd_result = CmdResult.create(StatusCode.ERROR) + cmd_result.set_property_string("detail", str(e)) + await ten_env.return_result(cmd_result, cmd) + async def _handle_user_left(self) -> None: + """Handle user left event.""" + self.users_count -= 1 + if self.users_count == 0: + self._reset_state() + + if self.users_count < 0: + self.users_count = 0 + async def _handle_user_joined(self) -> None: + """Handle user joined event.""" + self.users_count += 1 + if self.users_count == 1: + await self._greeting() + + async def _handle_input_truncation(self, reason: str): + """Handle input truncation events.""" + try: + self.ten_env.log_info(f"Input truncated due to: {reason}") + + if self.text_buffer: + await self._call_nova_model(self.text_buffer, self.image_buffers) + + self._reset_state() + + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Error handling input truncation: {e}") + + def _reset_state(self): + """Reset internal state.""" + self.text_buffer = "" + self.image_buffers = [] + self.input_start_time = 0 + + async def _initialize_aws_clients(self): + """Initialize AWS clients.""" + try: + if not self.bedrock_client: + self.bedrock_client = boto3.client('bedrock-runtime', + aws_access_key_id=self.config.access_key_id, + aws_secret_access_key=self.config.secret_access_key, + region_name=self.config.region + ) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Error initializing AWS clients: {e}") + raise + + async def _greeting(self) -> None: + """Send greeting message to the user.""" + if self.users_count == 1: + text = self.config.greeting or get_greeting_text(self.config.language) + self.ten_env.log_info(f"send greeting {text}") + await self._send_text_data(text, True, Role.Assistant) + + async def _send_text_data(self, text: str, end_of_segment: bool, role: Role): + """Send text data to the user.""" + try: + d = Data.create("text_data") + d.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, text) + d.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, end_of_segment) + d.set_property_string("role", role) + asyncio.create_task(self.ten_env.send_data(d)) + except Exception as e: + self.ten_env.log_error(f"Error sending text data: {e}") + + async def _call_nova_model(self, input_text: str, image_buffers: List[bytes]) -> None: + """Call Bedrock's Nova model with text and video input.""" + try: + if not self.bedrock_client: + await self._initialize_aws_clients() + + if not input_text: + self.ten_env.log_info("Text input is empty") + return + + contents = [] + + # Process images + if image_buffers: + filtered_buffers = filter_images(image_buffers, ONE_BATCH_SEND_COUNT) + for image_data in filtered_buffers: + contents.append({ + "image": { + "format": 'jpeg', + "source": { + "bytes": image_data + } + } + }) + # Prepare memory + while len(self.memory) > self.config.max_memory_length: + self.memory.pop(0) + while len(self.memory) > 0 and self.memory[0]["role"] == "assistant": + self.memory.pop(0) + while len(self.memory) > 0 and self.memory[-1]["role"] == "user": + self.memory.pop(-1) + + # Prepare request + contents.append({"text": input_text}) + messages = [] + for m in self.memory: + # Convert string content to list format if needed + m_content = m["content"] + if isinstance(m_content, str): + m_content = [{"text": m_content}] + messages.append({ + "role": m["role"], + "content": m_content + }) + messages.append({ + "role": "user", + "content": contents + }) + + inf_params = { + "maxTokens": self.config.max_tokens, + "topP": self.config.tokP, + "temperature": self.config.temperature + } + + additional_config = { + "inferenceConfig": { + "topK": self.config.topK + } + } + + system = [{ + "text": self.config.prompt + }] + + # Make API call + start_time = time.time() + response = self.bedrock_client.converse_stream( + modelId=self.config.model_id, + system=system, + messages=messages, + inferenceConfig=inf_params, + additionalModelRequestFields=additional_config, + ) + full_content = await self._process_stream_response(response, start_time) + # async append memory + async def async_append_memory(): + if not self.config.is_memory_enabled: + return + image = merge_images(image_buffers) + contents = [] + if image: + contents.append({ + "image": { + "format": 'jpeg', + "source": { + "bytes": image + } + } + }) + contents.append({"text": input_text}) + self.memory.append({"role": Role.User, "content": contents}) + self.memory.append({"role": Role.Assistant, "content": [{"text": full_content}]}) + + asyncio.create_task(async_append_memory()) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Error calling Nova model: {e}") + + async def _process_stream_response(self, response: Dict, start_time: float): + """Process streaming response from Nova model.""" + sentence = "" + full_content = "" + first_sentence_sent = False + + for event in response.get('stream'): + if "contentBlockDelta" in event: + if "text" in event["contentBlockDelta"]["delta"]: + content = event["contentBlockDelta"]["delta"]["text"] + full_content += content + + while True: + sentence, content, sentence_is_final = parse_sentence(sentence, content) + if not sentence or not sentence_is_final: + break + + self.ten_env.log_info(f"Processing sentence: [{sentence}]") + await self._send_text_data(sentence, False, Role.Assistant) + + if not first_sentence_sent: + first_sentence_sent = True + self.ten_env.log_info(f"First sentence latency: {(time.time() - start_time)*1000}ms") + + sentence = "" + + elif any(key in event for key in ["internalServerException", "modelStreamErrorException", + "throttlingException", "validationException"]): + self.ten_env.log_error(f"Stream error: {event}") + break + + elif 'metadata' in event: + if 'metrics' in event['metadata']: + self.ten_env.log_info(f"Nova model latency: {event['metadata']['metrics']['latencyMs']}ms") + + # Send final sentence + await self._send_text_data(sentence, True, Role.Assistant) + self.ten_env.log_info(f"Final sentence sent: [{sentence}]") + # Update metrics + self.processing_times.append(time.time() - start_time) + return full_content + + async def on_call_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError + + async def on_data_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError + + async def on_tools_update( + self, ten_env: AsyncTenEnv, tool + ) -> None: + """Called when a new tool is registered. Implement this method to process the new tool.""" + ten_env.log_info(f"on tools update {tool}") + # await self._update_session() \ No newline at end of file diff --git a/agents/ten_packages/extension/bedrock_llm_python/manifest.json b/agents/ten_packages/extension/bedrock_llm_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..eca39e1f0d873855cb6320461f823bda9b008d0f --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/manifest.json @@ -0,0 +1,172 @@ +{ + "type": "extension", + "name": "bedrock_llm_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md" + ] + }, + "api": { + "property": { + "base_uri": { + "type": "string" + }, + "api_key": { + "type": "string" + }, + "api_version": { + "type": "string" + }, + "model": { + "type": "string" + }, + "language": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "temperature": { + "type": "float32" + }, + "max_tokens": { + "type": "int32" + }, + "server_vad": { + "type": "bool" + }, + "input_transcript": { + "type": "bool" + }, + "sample_rate": { + "type": "int32" + }, + "stream_id": { + "type": "int32" + }, + "dump": { + "type": "bool" + }, + "greeting": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + }, + "is_memory_enabled": { + "type": "bool" + }, + "topP": { + "type": "float32" + }, + "topK": { + "type": "int32" + }, + "is_enable_video": { + "type": "bool" + } + }, + "video_frame_in": [ + { + "name": "video_frame", + "property": {} + } + ], + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + }, + { + "name": "append", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + }, + { + "name": "tool_register", + "property": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "string" + } + }, + "required": [ + "name", + "description", + "parameters" + ], + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + }, + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/bedrock_llm_python/property.json b/agents/ten_packages/extension/bedrock_llm_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..2eeb2c7bc9a3006d054cb468ea808831946211f6 --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/property.json @@ -0,0 +1,15 @@ +{ + "region": "us-west-2", + "access_key_id": "${env:AWS_ACCESS_KEY_ID}", + "secret_access_key": "${env:AWS_SECRET_ACCESS_KEY}", + "model": "amazon.nova-pro-v1:0", + "temperature": 0.7, + "max_tokens": 512, + "topP": 0.5, + "topK": 20, + "prompt": "Now you are an intelligent assistant with real-time interaction capabilities. I will provide you with a series of real-time video image information. Please understand these images as video frames. Based on the images and the user's input, engage in a conversation with the user, remembering the dialogue content in a concise and clear manner.", + "greeting": "TEN Agent connected. I am nova, How can I help you today?", + "max_memory_length": 10, + "is_memory_enabled": false, + "is_enable_video": true +} \ No newline at end of file diff --git a/agents/ten_packages/extension/bedrock_llm_python/requirements.txt b/agents/ten_packages/extension/bedrock_llm_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..3fc6cfbe927cf0281ec2b5b81993d95b1705d1f4 --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/requirements.txt @@ -0,0 +1,4 @@ +boto3 +numpy +python-dotenv +asyncio diff --git a/agents/ten_packages/extension/bedrock_llm_python/utils.py b/agents/ten_packages/extension/bedrock_llm_python/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8bf739aec2f6e073e63b9cf3d22aa73fcf4b04ab --- /dev/null +++ b/agents/ten_packages/extension/bedrock_llm_python/utils.py @@ -0,0 +1,118 @@ +"""Utility functions for BedrockV2V extension.""" +from io import BytesIO +from PIL import Image +from typing import List, Tuple, Any + +def is_punctuation(char: str) -> bool: + """Check if a character is a punctuation mark.""" + return char in [",", ",", ".", "。", "?", "?", "!", "!"] + +def parse_sentence(sentence: str, content: str) -> Tuple[str, str, bool]: + """Parse a sentence and return the complete sentence, remaining content, and completion status.""" + remain = "" + found_punc = False + + for char in content: + if not found_punc: + sentence += char + else: + remain += char + + if not found_punc and is_punctuation(char): + found_punc = True + + return sentence, remain, found_punc + +def rgb2base64jpeg(rgb_data: bytes, width: int, height: int) -> bytes: + """Convert RGB data to JPEG format.""" + # Convert the RGB image to a PIL Image + pil_image = Image.frombytes("RGBA", (width, height), bytes(rgb_data)) + pil_image = pil_image.convert("RGB") + + # Resize the image while maintaining its aspect ratio + pil_image = resize_image_keep_aspect(pil_image, 640) + + # Save the image to a BytesIO object in JPEG format + buffered = BytesIO() + pil_image.save(buffered, format="JPEG") + + return buffered.getvalue() + +def resize_image_keep_aspect(image: Image.Image, max_size: int = 512) -> Image.Image: + """Resize an image while maintaining its aspect ratio.""" + width, height = image.size + + if width <= max_size and height <= max_size: + return image + + aspect_ratio = width / height + + if width > height: + new_width = max_size + new_height = int(max_size / aspect_ratio) + else: + new_height = max_size + new_width = int(max_size * aspect_ratio) + + return image.resize((new_width, new_height)) + +def filter_images(image_array: List[Any], max_images: int = 10) -> List[Any]: + """Filter images to maintain a maximum count while preserving temporal distribution.""" + if len(image_array) <= max_images: + return image_array + + result = [] + skip = len(image_array) // max_images + + for i in range(0, len(image_array), skip): + result.append(image_array[i]) + if len(result) == max_images: + break + + return result + +# merge images into one image with a grid layout +def merge_images(image_array: List[Any], max_images: int = 4, width: int = 512) -> bytes: + """Merge multiple images into one image.""" + if len(image_array) == 0: + return b"" + if len(image_array) > max_images: + # Filter images to maintain a maximum count while preserving temporal distribution + image_array = filter_images(image_array, max_images) + + total_images = len(image_array) + # Calculate the number of rows and columns for the grid + rows = int((total_images - 1) / 2) + 1 + cols = 2 if total_images > 1 else 1 + + # Calculate the size of each image in the grid + image_width = width // cols + image_height = image_width + + # Create a new image to store the grid + grid = Image.new("RGB", (width, image_height * rows)) + + # Paste each image into the grid + for i, image in enumerate(image_array): + row = i // cols + col = i % cols + image = Image.open(BytesIO(image)) + image = resize_image_keep_aspect(image, image_width) + grid.paste(image, (col * image_width, row * image_height)) + + # Save the grid to a BytesIO object in JPEG format + buffered = BytesIO() + grid.save(buffered, format="JPEG") + + return buffered.getvalue() + + +def get_greeting_text(language: str) -> str: + """Get appropriate greeting text based on language.""" + greetings = { + "zh-CN": "你好。", + "ja-JP": "こんにちは", + "ko-KR": "안녕하세요", + "en-US": "Hi, there." + } + return greetings.get(language, "Hi, there.") diff --git a/agents/ten_packages/extension/bingsearch_tool_python/README.md b/agents/ten_packages/extension/bingsearch_tool_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..581fdf5ef16af6c61a188ccb7002ef7db878a6d0 --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/README.md @@ -0,0 +1,29 @@ +# bingsearch_tool_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/bingsearch_tool_python/__init__.py b/agents/ten_packages/extension/bingsearch_tool_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd75ddef4ae8e15366d6ed94ee557e6481a4989 --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/bingsearch_tool_python/addon.py b/agents/ten_packages/extension/bingsearch_tool_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..4d066229b9c2f3fce6b2d27ee56ed2691a296645 --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/addon.py @@ -0,0 +1,21 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("bingsearch_tool_python") +class BingSearchToolExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import BingSearchToolExtension + ten_env.log_info("BingSearchToolExtensionAddon on_create_instance") + ten_env.on_create_instance_done(BingSearchToolExtension(name), context) diff --git a/agents/ten_packages/extension/bingsearch_tool_python/extension.py b/agents/ten_packages/extension/bingsearch_tool_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..14c8aa597e96402cdeaaa91fd3699901ea301b29 --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/extension.py @@ -0,0 +1,167 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import json +import aiohttp +from typing import Any, List + +from ten import ( + Cmd, +) +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig +from ten_ai_base.types import LLMToolMetadata, LLMToolMetadataParameter, LLMToolResult +from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension + +CMD_TOOL_REGISTER = "tool_register" +CMD_TOOL_CALL = "tool_call" +CMD_PROPERTY_NAME = "name" +CMD_PROPERTY_ARGS = "args" + +TOOL_REGISTER_PROPERTY_NAME = "name" +TOOL_REGISTER_PROPERTY_DESCRIPTON = "description" +TOOL_REGISTER_PROPERTY_PARAMETERS = "parameters" +TOOL_CALLBACK = "callback" + +TOOL_NAME = "bing_search" +TOOL_DESCRIPTION = "Use Bing.com to search for latest information. Call this function if you are not sure about the answer." +TOOL_PARAMETERS = { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The search query to call Bing Search.", + } + }, + "required": ["query"], +} + +PROPERTY_API_KEY = "api_key" # Required + +DEFAULT_BING_SEARCH_ENDPOINT = "https://api.bing.microsoft.com/v7.0/search" + +# BING_SEARCH_ENDPOINT is the default endpoint for Bing Web Search API. +# Currently There are two web-based Bing Search services available on Azure, +# i.e. Bing Web Search[1] and Bing Custom Search[2]. Compared to Bing Custom Search, +# Both services that provides a wide range of search results, while Bing Custom +# Search requires you to provide an additional custom search instance, `customConfig`. +# Both services are available for BingSearchAPIWrapper. +# History of Azure Bing Search API: +# Before shown in Azure Marketplace as a separate service, Bing Search APIs were +# part of Azure Cognitive Services, the endpoint of which is unique, and the user +# must specify the endpoint when making a request. After transitioning to Azure +# Marketplace, the endpoint is standardized and the user does not need to specify +# the endpoint[3]. +# Reference: +# 1. https://learn.microsoft.com/en-us/bing/search-apis/bing-web-search/overview +# 2. https://learn.microsoft.com/en-us/bing/search-apis/bing-custom-search/overview +# 3. https://azure.microsoft.com/en-in/updates/bing-search-apis-will-transition-from-azure-cognitive-services-to-azure-marketplace-on-31-october-2023/ + +class BingSearchToolConfig(BaseConfig): + api_key: str = "" + +class BingSearchToolExtension(AsyncLLMToolBaseExtension): + + def __init__(self, name: str) -> None: + super().__init__(name) + self.session = None + self.config = None + self.k = 10 + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_init") + self.session = aiohttp.ClientSession() + await super().on_init(ten_env) + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_start") + await super().on_start(ten_env) + + self.config = await BingSearchToolConfig.create_async(ten_env=ten_env) + + if not self.config.api_key: + ten_env.log_info("API key is missing, exiting on_start") + return + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_stop") + + # clean up resources + if self.session and not self.session.closed: + await self.session.close() + self.session = None # Ensure it can't be reused accidentally + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + await super().on_cmd(ten_env, cmd) + + def get_tool_metadata(self, ten_env: AsyncTenEnv) -> list[LLMToolMetadata]: + return [ + LLMToolMetadata( + name=TOOL_NAME, + description=TOOL_DESCRIPTION, + parameters=[ + LLMToolMetadataParameter( + name="query", + type="string", + description="The search query to call Bing Search.", + required=True, + ), + ], + ) + ] + + async def run_tool( + self, ten_env: AsyncTenEnv, name: str, args: dict + ) -> LLMToolResult | None: + if name == TOOL_NAME: + result = await self._do_search(ten_env, args) + # result = LLMCompletionContentItemText(text="I see something") + return {"content": json.dumps(result)} + + async def _do_search(self, ten_env: AsyncTenEnv, args: dict) -> Any: + if "query" not in args: + raise ValueError("Failed to get property") + + query = args["query"] + snippets = [] + results = await self._bing_search_results(ten_env, query, count=self.k) + if len(results) == 0: + return "No good Bing Search Result was found" + + for result in results: + snippets.append(result["snippet"]) + + return snippets + + async def _initialize_session(self, ten_env: AsyncTenEnv): + if self.session is None or self.session.closed: + ten_env.log_debug("Initializing new session") + self.session = aiohttp.ClientSession() + + async def _bing_search_results(self, ten_env: AsyncTenEnv, search_term: str, count: int) -> List[dict]: + await self._initialize_session(ten_env) + headers = {"Ocp-Apim-Subscription-Key": self.config.api_key} + params = { + "q": search_term, + "count": count, + "textDecorations": "true", + "textFormat": "HTML", + } + + async with self.session as session: + async with session.get( + DEFAULT_BING_SEARCH_ENDPOINT, headers=headers, params=params + ) as response: + response.raise_for_status() + search_results = await response.json() + + if "webPages" in search_results: + return search_results["webPages"]["value"] + return [] diff --git a/agents/ten_packages/extension/bingsearch_tool_python/manifest.json b/agents/ten_packages/extension/bingsearch_tool_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..4caef2c1371c29fd8bbc6f27119d3d8685641ecb --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/manifest.json @@ -0,0 +1,82 @@ +{ + "type": "extension", + "name": "bingsearch_tool_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + } + }, + "cmd_out": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_in": [ + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/bingsearch_tool_python/property.json b/agents/ten_packages/extension/bingsearch_tool_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..d0cf467d7dd4c47623ca2c3a8085e098c8110214 --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/property.json @@ -0,0 +1,3 @@ +{ + "api_key": "${env:BING_API_KEY|}" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/bingsearch_tool_python/requirements.txt b/agents/ten_packages/extension/bingsearch_tool_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce2357185aba0b35ffd5d50fb0f6500161ec0293 --- /dev/null +++ b/agents/ten_packages/extension/bingsearch_tool_python/requirements.txt @@ -0,0 +1 @@ +aiohttp \ No newline at end of file diff --git a/agents/ten_packages/extension/bytedance_tts/README.md b/agents/ten_packages/extension/bytedance_tts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..944fc8423e92ec587c0f87ca0ae562b5294a70ab --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/README.md @@ -0,0 +1,29 @@ +# bytedance_tts + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/bytedance_tts/__init__.py b/agents/ten_packages/extension/bytedance_tts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/bytedance_tts/addon.py b/agents/ten_packages/extension/bytedance_tts/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..0ceaed9c8995ae7a2d504cc4ece66d443706c319 --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/addon.py @@ -0,0 +1,20 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("bytedance_tts") +class BytedanceTTSExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import BytedanceTTSExtension + + ten_env.log_info("BytedanceTTSExtensionAddon on_create_instance") + ten_env.on_create_instance_done(BytedanceTTSExtension(name), context) diff --git a/agents/ten_packages/extension/bytedance_tts/bytedance_tts.py b/agents/ten_packages/extension/bytedance_tts/bytedance_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..237eef7e1aed142147779218b98a2dcb58b81e31 --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/bytedance_tts.py @@ -0,0 +1,264 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from dataclasses import dataclass +from typing import AsyncIterator, Tuple + +from ten_ai_base.config import BaseConfig +from ten import ( + AsyncTenEnv, +) + +import copy +import websockets +import uuid +import json +import gzip +import asyncio +import threading +from datetime import datetime + + +MESSAGE_TYPES = { + 11: "audio-only server response", + 12: "frontend server response", + 15: "error message from server", +} +MESSAGE_TYPE_SPECIFIC_FLAGS = { + 0: "no sequence number", + 1: "sequence number > 0", + 2: "last message from server (seq < 0)", + 3: "sequence number < 0", +} +MESSAGE_SERIALIZATION_METHODS = {0: "no serialization", 1: "JSON", 15: "custom type"} +MESSAGE_COMPRESSIONS = {0: "no compression", 1: "gzip", 15: "custom compression method"} + +LATENCY_SAMPLE_INTERVAL_MS = 5 + + +@dataclass +class TTSConfig(BaseConfig): + # Parameters, refer to: https://www.volcengine.com/docs/6561/79823. + appid: str = "" + token: str = "" + + # Refer to: https://www.volcengine.com/docs/6561/1257544. + voice_type: str = "BV001_streaming" + sample_rate: int = 16000 + api_url: str = "wss://openspeech.bytedance.com/api/v1/tts/ws_binary" + cluster: str = "volcano_tts" + + +class TTSClient: + def __init__(self, config: TTSConfig, ten_env: AsyncTenEnv) -> None: + self.config = config + self.websocket = None + self.ten_env = ten_env + + # Refer to: https://www.volcengine.com/docs/6561/79823. + self.request_template = { + "app": { + "appid": self.config.appid, + "token": "access_token", + "cluster": self.config.cluster, + }, + "user": {"uid": ""}, # Any non-empty string, used for tracing. + "audio": { + "rate": self.config.sample_rate, + "voice_type": self.config.voice_type, + "encoding": "pcm", + "speed_ratio": 1.0, + "volume_ratio": 1.0, + "pitch_ratio": 1.0, + }, + "request": { + "reqid": "", # Must be unique for each request. + "text": "", # Text to be synthesized. + "text_type": "plain", + "operation": "submit", + }, + } + + # version: b0001 (4 bits) + # header size: b0001 (4 bits) + # message type: b0001 (Full client request) (4bits) + # message type specific flags: b0000 (none) (4bits) + # message serialization method: b0001 (JSON) (4 bits) + # message compression: b0001 (gzip) (4bits) + # reserved data: 0x00 (1 byte) + self.default_header = bytearray(b"\x11\x10\x11\x00") + self._cancel = threading.Event() + + # Latency. + self._latest_record_time = None + + def is_cancelled(self) -> bool: + return self._cancel.is_set() + + async def cancel(self) -> None: + self._cancel.set() + + async def connect(self) -> None: + header = {"Authorization": f"Bearer; {self.config.token}"} + self.websocket = await websockets.connect( + self.config.api_url, + extra_headers=header, + ping_interval=None, + close_timeout=1, # Fast close, as the `flush` cmd will close the connection. + ) + self.ten_env.log_info("Websocket connection established.") + + async def close(self) -> None: + if self.websocket is not None: + await self.websocket.close() + self.websocket = None + self.ten_env.log_info("Websocket connection closed.") + else: + self.ten_env.log_info("Websocket is not connected.") + + async def reconnect(self) -> None: + await self.close() + await self.connect() + + def parse_response(self, response: websockets.Data) -> Tuple[bytes, bool]: + protocol_version = response[0] >> 4 + header_size = response[0] & 0x0F + message_type = response[1] >> 4 + message_type_specific_flags = response[1] & 0x0F + serialization_method = response[2] >> 4 + message_compression = response[2] & 0x0F + reserved = response[3] + header_extensions = response[4 : header_size * 4] + payload = response[header_size * 4 :] + self.ten_env.log_debug( + f"Protocol version: {protocol_version:#x} - version {protocol_version}" + ) + self.ten_env.log_debug( + f"Header size: {header_size:#x} - {header_size * 4} bytes" + ) + self.ten_env.log_debug( + f"Message type: {message_type:#x} - {MESSAGE_TYPES[message_type]}" + ) + self.ten_env.log_debug( + f"Message type specific flags: {message_type_specific_flags:#x} - {MESSAGE_TYPE_SPECIFIC_FLAGS[message_type_specific_flags]}" + ) + self.ten_env.log_debug( + f"Message serialization method: {serialization_method:#x} - {MESSAGE_SERIALIZATION_METHODS[serialization_method]}" + ) + self.ten_env.log_debug( + f"Message compression: {message_compression:#x} - {MESSAGE_COMPRESSIONS[message_compression]}" + ) + self.ten_env.log_debug(f"Reserved: {reserved:#04x}") + + if header_size != 1: + self.ten_env.log_debug(f"Header extensions: {header_extensions}") + + if message_type == 0xB: # audio-only server response + if message_type_specific_flags == 0: # no sequence number as ACK + self.ten_env.log_debug("Payload size: 0") + return None, False + else: + sequence_number = int.from_bytes(payload[:4], "big", signed=True) + payload_size = int.from_bytes(payload[4:8], "big", signed=False) + payload = payload[8:] + self.ten_env.log_debug(f"Sequence number: {sequence_number}") + self.ten_env.log_debug(f"Payload size: {payload_size} bytes") + if sequence_number < 0: + return payload, True + else: + return payload, False + elif message_type == 0xF: + code = int.from_bytes(payload[:4], "big", signed=False) + msg_size = int.from_bytes(payload[4:8], "big", signed=False) + error_msg = payload[8:] + if message_compression == 1: + error_msg = gzip.decompress(error_msg) + error_msg = str(error_msg, "utf-8") + self.ten_env.log_error(f"Error message code: {code}") + self.ten_env.log_error(f"Error message size: {msg_size} bytes") + self.ten_env.log_error(f"Error message: {error_msg}") + return None, True + elif message_type == 0xC: + msg_size = int.from_bytes(payload[:4], "big", signed=False) + payload = payload[4:] + if message_compression == 1: + payload = gzip.decompress(payload) + self.ten_env.log_debug(f"Frontend message: {payload}") + else: + self.ten_env.log_error("undefined message type!") + return None, True + + def record_latency(self, request_id: str, start: datetime) -> None: + end_time = datetime.now() + + if self._latest_record_time: + sample_interval = datetime.now() - self._latest_record_time + if sample_interval.total_seconds() < LATENCY_SAMPLE_INTERVAL_MS: + return + + self._latest_record_time = end_time + latency = int((end_time - start).total_seconds() * 1000) + self.ten_env.log_info(f"Request ({request_id}), ttfb {latency}ms.") + + async def text_to_speech_stream(self, text: str) -> AsyncIterator[bytes]: + ws = self.websocket + if ws is None: + await self.connect() + ws = self.websocket + + start_ms = datetime.now() + request_id = str(uuid.uuid4()) + + request = copy.deepcopy(self.request_template) + request["request"]["reqid"] = request_id + request["request"]["text"] = text + request["user"]["uid"] = str(uuid.uuid4()) + + request_bytes = str.encode(json.dumps(request)) + request_bytes = gzip.compress(request_bytes) + full_request = bytearray(self.default_header) + + # payload size(4 bytes) + full_request.extend((len(request_bytes)).to_bytes(4, "big")) + + # payload + full_request.extend(request_bytes) + + try: + await ws.send(full_request) + self.ten_env.log_info(f"Sent: {request}") + + while True: + if self.is_cancelled(): + self.ten_env.log_info(f"Request ({request_id}) has been cancelled.") + + # Current connection should be closed, as the server will not drop the remain data. + await self.close() + self._cancel.clear() + break + + resp = await ws.recv() + payload, done = self.parse_response(resp) + + if payload: + yield payload + self.record_latency(request_id, start_ms) + + if done: + self.ten_env.log_info( + f"Response is completed for request: {request_id}." + ) + break + + except websockets.exceptions.ConnectionClosedError as e: + self.ten_env.log_error( + f"Connection is closed with error: {e}, request: {request_id}." + ) + await self.connect() + except asyncio.TimeoutError: + self.ten_env.log_error("Timeout waiting for response.") diff --git a/agents/ten_packages/extension/bytedance_tts/extension.py b/agents/ten_packages/extension/bytedance_tts/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..482f6a1ae428f047a1db104ede6c0f68a156e221 --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/extension.py @@ -0,0 +1,60 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import traceback + +from .bytedance_tts import TTSConfig, TTSClient +from ten import ( + AsyncTenEnv, +) +from ten_ai_base.tts import AsyncTTSBaseExtension + + +class BytedanceTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + self.config = None + self.client = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + try: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.config = await TTSConfig.create_async(ten_env=ten_env) + + if not self.config.appid: + raise ValueError("appid is required") + + if not self.config.token: + raise ValueError("token is required") + + self.client = TTSClient(config=self.config, ten_env=ten_env) + await self.client.connect() + except Exception: + ten_env.log_error(f"on_start failed: {traceback.format_exc()}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + if self.client: + await self.client.close() + + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + async for audio_data in self.client.text_to_speech_stream(input_text): + await self.send_audio_out(ten_env, audio_data) + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + await self.client.cancel() diff --git a/agents/ten_packages/extension/bytedance_tts/manifest.json b/agents/ten_packages/extension/bytedance_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..580bf5da47a4a8ac0d964efec71087305412c179 --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/manifest.json @@ -0,0 +1,70 @@ +{ + "type": "extension", + "name": "bytedance_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "appid": { + "type": "string" + }, + "token": { + "type": "string" + }, + "voice_type": { + "type": "string" + }, + "sample_rate": { + "type": "int64" + }, + "api_url": { + "type": "string" + }, + "cluster": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/bytedance_tts/property.json b/agents/ten_packages/extension/bytedance_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..8cae48e915a47eec29324e24aa8c2b9a863facce --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/property.json @@ -0,0 +1,8 @@ +{ + "appid": "${env:BYTEDANCE_TTS_APPID}", + "token": "${env:BYTEDANCE_TTS_TOKEN}", + "sample_rate": 16000, + "voice_type": "BV001_streaming", + "api_url": "wss://openspeech.bytedance.com/api/v1/tts/ws_binary", + "cluster": "volcano_tts" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/bytedance_tts/requirements.txt b/agents/ten_packages/extension/bytedance_tts/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f29f909892ecc844cfcc5c03df003062c38d381 --- /dev/null +++ b/agents/ten_packages/extension/bytedance_tts/requirements.txt @@ -0,0 +1,2 @@ +asyncio +websockets==13.1 \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/README.md b/agents/ten_packages/extension/cartesia_tts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..931f0029d3aa0e875ce3f73f20273fca7ae0c364 --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/README.md @@ -0,0 +1,29 @@ +# cartesia_tts + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/cartesia_tts/__init__.py b/agents/ten_packages/extension/cartesia_tts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/cartesia_tts/addon.py b/agents/ten_packages/extension/cartesia_tts/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..55d6343584824b6b952bf3d115689accb2059cc4 --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("cartesia_tts") +class CartesiaTTSExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import CartesiaTTSExtension + ten_env.log_info("CartesiaTTSExtensionAddon on_create_instance") + ten_env.on_create_instance_done(CartesiaTTSExtension(name), context) diff --git a/agents/ten_packages/extension/cartesia_tts/cartesia_tts.py b/agents/ten_packages/extension/cartesia_tts/cartesia_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc79b493003e4352909a440205507fb77738d5c --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/cartesia_tts.py @@ -0,0 +1,42 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from dataclasses import dataclass +from typing import AsyncIterator +from cartesia import AsyncCartesia + +from ten_ai_base.config import BaseConfig + + +@dataclass +class CartesiaTTSConfig(BaseConfig): + api_key: str = "" + language: str = "en" + model_id: str = "sonic-english" + request_timeout_seconds: int = 10 + sample_rate: int = 16000 + voice_id: str = "f9836c6e-a0bd-460e-9d3c-f7299fa60f94" + +class CartesiaTTS: + def __init__(self, config: CartesiaTTSConfig) -> None: + self.config = config + self.client = AsyncCartesia(api_key=config.api_key, timeout=config.request_timeout_seconds) + + def text_to_speech_stream(self, text: str) -> AsyncIterator[bytes]: + return self.client.tts.sse( + language=self.config.language, + model_id=self.config.model_id, + output_format={ + "container": "raw", + "encoding": "pcm_s16le", + "sample_rate": self.config.sample_rate, + }, + stream=True, + transcript=text, + voice_id=self.config.voice_id, + ) \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/extension.py b/agents/ten_packages/extension/cartesia_tts/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..03a36b981b62603dcf6176344bc7a64986d6c3e5 --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/extension.py @@ -0,0 +1,55 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import traceback + +from .cartesia_tts import CartesiaTTS, CartesiaTTSConfig +from ten import ( + AsyncTenEnv, +) +from ten_ai_base.tts import AsyncTTSBaseExtension + + +class CartesiaTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + self.config = None + self.client = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + try: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.config = await CartesiaTTSConfig.create_async(ten_env=ten_env) + + if not self.config.api_key: + raise ValueError("api_key is required") + + self.client = CartesiaTTS(self.config) + except Exception: + ten_env.log_error(f"on_start failed: {traceback.format_exc()}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + audio_stream = await self.client.text_to_speech_stream(input_text) + + async for audio_data in audio_stream: + await self.send_audio_out(ten_env, audio_data["audio"]) + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + return await super().on_cancel_tts(ten_env) diff --git a/agents/ten_packages/extension/cartesia_tts/manifest.json b/agents/ten_packages/extension/cartesia_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..43ae8ba9ef666b8c101774c937bb5bdeb27809bc --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/manifest.json @@ -0,0 +1,67 @@ +{ + "type": "extension", + "name": "cartesia_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "language": { + "type": "string" + }, + "model_id": { + "type": "string" + }, + "sample_rate": { + "type": "int64" + }, + "voice_id": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/property.json b/agents/ten_packages/extension/cartesia_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..8650c298d9159c147aff747359d1955472105e1c --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/property.json @@ -0,0 +1,7 @@ +{ + "api_key": "${env:CARTESIA_API_KEY}", + "language": "en", + "model_id": "sonic-english", + "sample_rate": 16000, + "voice_id": "f9836c6e-a0bd-460e-9d3c-f7299fa60f94" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/requirements.txt b/agents/ten_packages/extension/cartesia_tts/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..59c3d54e0cc8e6786630bfbedd282753d1bbfb6e --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/requirements.txt @@ -0,0 +1 @@ +cartesia \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/tests/bin/start b/agents/ten_packages/extension/cartesia_tts/tests/bin/start new file mode 100644 index 0000000000000000000000000000000000000000..04d784ea179c32ded5fc50565fb28b4ae0585c6b --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/tests/bin/start @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")/../.." + +export PYTHONPATH=.ten/app:.ten/app/ten_packages/system/ten_runtime_python/lib:.ten/app/ten_packages/system/ten_runtime_python/interface:.ten/app/ten_packages/system/ten_ai_base/interface:$PYTHONPATH + +# If the Python app imports some modules that are compiled with a different +# version of libstdc++ (ex: PyTorch), the Python app may encounter confusing +# errors. To solve this problem, we can preload the correct version of +# libstdc++. +# +# export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6 +# +# Another solution is to make sure the module 'ten_runtime_python' is imported +# _after_ the module that requires another version of libstdc++ is imported. +# +# Refer to https://github.com/pytorch/pytorch/issues/102360?from_wecom=1#issuecomment-1708989096 + +pytest tests/ "$@" \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/tests/conftest.py b/agents/ten_packages/extension/cartesia_tts/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2175e36e06ea1b6b40e07c5cf1e134ee1aec17 --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/tests/conftest.py @@ -0,0 +1,36 @@ +# +# Copyright © 2025 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +import pytest +import sys +import os +from ten import ( + unregister_all_addons_and_cleanup, +) + + +@pytest.fixture(scope="session", autouse=True) +def global_setup_and_teardown(): + # Set the environment variable. + os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] = "true" + + # Verify the environment variable is correctly set. + if ( + "TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE" not in os.environ + or os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] != "true" + ): + print( + "Failed to set TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE", + file=sys.stderr, + ) + sys.exit(1) + + # Yield control to the test; after the test execution is complete, continue + # with the teardown process. + yield + + # Teardown part. + unregister_all_addons_and_cleanup() \ No newline at end of file diff --git a/agents/ten_packages/extension/cartesia_tts/tests/test_basic.py b/agents/ten_packages/extension/cartesia_tts/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..a8978b391c5c38029c375b08e8a54bb6cd94a6d0 --- /dev/null +++ b/agents/ten_packages/extension/cartesia_tts/tests/test_basic.py @@ -0,0 +1,35 @@ +# +# Copyright © 2024 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +from pathlib import Path +from ten import ExtensionTester, TenEnvTester, Cmd, CmdResult, StatusCode + + +class ExtensionTesterBasic(ExtensionTester): + def check_hello(self, ten_env: TenEnvTester, result: CmdResult): + statusCode = result.get_status_code() + print("receive hello_world, status:" + str(statusCode)) + + if statusCode == StatusCode.OK: + ten_env.stop_test() + + def on_start(self, ten_env: TenEnvTester) -> None: + new_cmd = Cmd.create("hello_world") + + print("send hello_world") + ten_env.send_cmd( + new_cmd, + lambda ten_env, result, _: self.check_hello(ten_env, result), + ) + + print("tester on_start_done") + ten_env.on_start_done() + + +def test_basic(): + tester = ExtensionTesterBasic() + tester.set_test_mode_single("cartesia_tts") + tester.run() diff --git a/agents/ten_packages/extension/computer_tool_python/README.md b/agents/ten_packages/extension/computer_tool_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f82873b84d89f2e7467d26170b37bd77ce7069e3 --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/README.md @@ -0,0 +1,22 @@ +# computer_tool_python + +This is the tool demo for computer use. + +## Features + +- Open the Application +- Analyze the code through screen sharing +- Generate code +- Save the content to the Note book + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + +### Out: + +- `tool_register`: auto register tool to llm + +### In: + +- `tool_call`: sync cmd to call computer usecase action \ No newline at end of file diff --git a/agents/ten_packages/extension/computer_tool_python/__init__.py b/agents/ten_packages/extension/computer_tool_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/computer_tool_python/addon.py b/agents/ten_packages/extension/computer_tool_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6ef817a01b3c04d1becaf138bfd50bd3118db1 --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import ComputerToolExtension + + +@register_addon_as_extension("computer_tool_python") +class ComputerToolExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + ten_env.log_info("ComputerToolExtensionAddon on_create_instance") + ten_env.on_create_instance_done(ComputerToolExtension(name), context) diff --git a/agents/ten_packages/extension/computer_tool_python/extension.py b/agents/ten_packages/extension/computer_tool_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..02375bf7507053f2da762aee279bb5a80ca14db9 --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/extension.py @@ -0,0 +1,196 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import json +from pydantic import BaseModel +from base64 import b64encode +from io import BytesIO +from typing import Any, Dict +from ten_ai_base.const import CONTENT_DATA_OUT_NAME, DATA_OUT_PROPERTY_END_OF_SEGMENT, DATA_OUT_PROPERTY_TEXT +from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension +from ten_ai_base.types import LLMToolMetadata, LLMToolMetadataParameter, LLMToolResult, LLMToolResultLLMResult +from .openai import OpenAIChatGPT, OpenAIChatGPTConfig + +from PIL import Image +from ten import ( + AsyncTenEnv, + AudioFrame, + VideoFrame, + Data +) + +OPEN_WEBSITE_TOOL_NAME = "open_website" +OPEN_WEBSITE_TOOL_DESCRIPTION = "Open a website with given site name" + +class WebsiteEvent(BaseModel): + website_name: str + website_url: str + +def rgb2base64jpeg(rgb_data, width, height): + # Convert the RGB image to a PIL Image + pil_image = Image.frombytes("RGBA", (width, height), bytes(rgb_data)) + pil_image = pil_image.convert("RGB") + + # Resize the image while maintaining its aspect ratio + pil_image = resize_image_keep_aspect(pil_image, 1080) + + # Save the image to a BytesIO object in JPEG format + buffered = BytesIO() + pil_image.save(buffered, format="png") + pil_image.save("test.png", format="png") + + # Get the byte data of the JPEG image + jpeg_image_data = buffered.getvalue() + + # Convert the JPEG byte data to a Base64 encoded string + base64_encoded_image = b64encode(jpeg_image_data).decode("utf-8") + + # Create the data URL + mime_type = "image/png" + base64_url = f"data:{mime_type};base64,{base64_encoded_image}" + return base64_url + + +def resize_image_keep_aspect(image, max_size=512): + """ + Resize an image while maintaining its aspect ratio, ensuring the larger dimension is max_size. + If both dimensions are smaller than max_size, the image is not resized. + + :param image: A PIL Image object + :param max_size: The maximum size for the larger dimension (width or height) + :return: A PIL Image object (resized or original) + """ + # Get current width and height + width, height = image.size + + # If both dimensions are already smaller than max_size, return the original image + if width <= max_size and height <= max_size: + return image + + # Calculate the aspect ratio + aspect_ratio = width / height + + # Determine the new dimensions + if width > height: + new_width = max_size + new_height = int(max_size / aspect_ratio) + else: + new_height = max_size + new_width = int(max_size * aspect_ratio) + + # Resize the image with the new dimensions + resized_image = image.resize((new_width, new_height)) + + return resized_image + +class ComputerToolExtension(AsyncLLMToolBaseExtension): + + def __init__(self, name: str) -> None: + super().__init__(name) + self.openai_chatgpt = None + self.config = None + self.loop = None + self.memory = [] + self.max_memory_length = 10 + self.image_data = None + self.image_width = 0 + self.image_height = 0 + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_init") + await super().on_init(ten_env) + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_start") + await super().on_start(ten_env) + + # Prepare configuration + self.config = await OpenAIChatGPTConfig.create_async(ten_env=ten_env) + + # Mandatory properties + if not self.config.api_key: + ten_env.log_info("API key is missing, exiting on_start") + return + + self.openai_chatgpt = OpenAIChatGPT(ten_env, self.config) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_stop") + await super().on_stop(ten_env) + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_deinit") + await super().on_deinit(ten_env) + + async def on_audio_frame(self, ten_env: AsyncTenEnv, audio_frame: AudioFrame) -> None: + audio_frame_name = audio_frame.get_name() + ten_env.log_debug("on_audio_frame name {}".format(audio_frame_name)) + + async def on_video_frame(self, ten_env: AsyncTenEnv, video_frame: VideoFrame) -> None: + video_frame_name = video_frame.get_name() + ten_env.log_debug("on_video_frame name {}".format(video_frame_name)) + + self.image_data = video_frame.get_buf() + self.image_width = video_frame.get_width() + self.image_height = video_frame.get_height() + + def get_tool_metadata(self, _: AsyncTenEnv) -> list[LLMToolMetadata]: + return [ + LLMToolMetadata( + name=OPEN_WEBSITE_TOOL_NAME, + description=OPEN_WEBSITE_TOOL_DESCRIPTION, + parameters=[ + LLMToolMetadataParameter( + name="name", + type="string", + description="The name of the website to open", + required=True, + ), + LLMToolMetadataParameter( + name="url", + type="string", + description="The url of the given website, get based on name", + required=True, + ), + ] + ), + ] + + async def run_tool(self, ten_env: AsyncTenEnv, name: str, args: dict) -> LLMToolResult: + if name == OPEN_WEBSITE_TOOL_NAME: + site_name = args.get("name") + site_url = args.get("url") + ten_env.log_info(f"open site {site_name} {site_url}") + result = await self._open_website(site_name, site_url, ten_env) + return LLMToolResultLLMResult( + type="llmresult", + content=json.dumps(result), + ) + + async def _open_website(self, site_name: str, site_url: str, ten_env: AsyncTenEnv) -> Any: + await self._send_data(ten_env, "browse_website", {"name": site_name, "url": site_url}) + return {"result": "success"} + + async def _send_data(self, ten_env: AsyncTenEnv, action: str, data: Dict[str, Any]): + try: + action_data = json.dumps({ + "type": "action", + "data": { + "action": action, + "data": data + } + }) + + output_data = Data.create(CONTENT_DATA_OUT_NAME) + output_data.set_property_string( + DATA_OUT_PROPERTY_TEXT, + action_data + ) + output_data.set_property_bool( + DATA_OUT_PROPERTY_END_OF_SEGMENT, True + ) + await ten_env.send_data(output_data) + except Exception as err: + ten_env.log_warn(f"send data error {err}") \ No newline at end of file diff --git a/agents/ten_packages/extension/computer_tool_python/manifest.json b/agents/ten_packages/extension/computer_tool_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..7f8c95e33d6175a5fdfcb946dfb3bd0d9dd17d94 --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/manifest.json @@ -0,0 +1,87 @@ +{ + "type": "extension", + "name": "computer_tool_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "frequency_penalty": { + "type": "float64" + }, + "presence_penalty": { + "type": "float64" + }, + "temperature": { + "type": "float64" + }, + "top_p": { + "type": "float64" + }, + "model": { + "type": "string" + }, + "max_tokens": { + "type": "int64" + }, + "base_url": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + }, + "vendor": { + "type": "string" + }, + "azure_endpoint": { + "type": "string" + }, + "azure_api_version": { + "type": "string" + } + }, + "cmd_out": [ + { + "name": "tool_register", + "property": {} + } + ], + "cmd_in": [ + { + "name": "tool_call", + "property": {} + } + ], + "video_frame_in": [ + { + "name": "video_frame", + "property": {} + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/computer_tool_python/openai.py b/agents/ten_packages/extension/computer_tool_python/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..652e484ff9372767d8f04b6a154fd6bb6e37faf5 --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/openai.py @@ -0,0 +1,136 @@ +import random +import requests +from openai import AsyncOpenAI +from ten_ai_base.config import BaseConfig +from dataclasses import dataclass +from ten.async_ten_env import AsyncTenEnv + +@dataclass +class OpenAIChatGPTConfig(BaseConfig): + api_key: str = "" + base_url: str = "https://api.openai.com/v1" + model: str = "gpt-4o" # Adjust this to match the equivalent of `openai.GPT4o` in the Python library + prompt: str = "You are a voice assistant who talks in a conversational way and can chat with me like my friends. I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. Don’t talk like a robot, instead I would like you to talk like a real human with emotions. I will use your answer for text-to-speech, so don’t return me any meaningless characters. I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. When giving me a list of options, express the options in a narrative way instead of bullet points." + frequency_penalty: float = 0.9 + presence_penalty: float = 0.9 + top_p: float = 1.0 + temperature: float = 0.1 + max_tokens: int = 512 + seed: int = random.randint(0, 10000) + proxy_url: str = "" + max_memory_length: int = 10 + vendor: str = "openai" + azure_endpoint: str = "" + azure_api_version: str = "" + + @classmethod + def default_config(cls): + return cls( + base_url="https://api.openai.com/v1", + api_key="", + model="gpt-4o", # Adjust this to match the equivalent of `openai.GPT4o` in the Python library + prompt="You are a voice assistant who talks in a conversational way and can chat with me like my friends. I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. Don’t talk like a robot, instead I would like you to talk like a real human with emotions. I will use your answer for text-to-speech, so don’t return me any meaningless characters. I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. When giving me a list of options, express the options in a narrative way instead of bullet points.", + frequency_penalty=0.9, + presence_penalty=0.9, + top_p=1.0, + temperature=0.1, + max_tokens=512, + seed=random.randint(0, 10000), + proxy_url="" + ) + + +class OpenAIChatGPT: + client = None + def __init__(self, ten_env:AsyncTenEnv, config: OpenAIChatGPTConfig): + self.config = config + ten_env.log_info(f"apikey {config.api_key}, base_url {config.base_url}") + self.client = AsyncOpenAI( + api_key=config.api_key, + base_url=config.base_url + ) + self.session = requests.Session() + if config.proxy_url: + proxies = { + "http": config.proxy_url, + "https": config.proxy_url, + } + self.session.proxies.update(proxies) + self.client.session = self.session + + async def get_chat_completions_structured(self, messages, response_format): + req = { + "model":"gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": self.config.prompt, + }, + *messages, + ], + "temperature": self.config.temperature, + "top_p": self.config.top_p, + "presence_penalty": self.config.presence_penalty, + "frequency_penalty": self.config.frequency_penalty, + "max_tokens": self.config.max_tokens, + "seed": self.config.seed, + "response_format": response_format, + } + + try: + completion = await self.client.beta.chat.completions.parse(**req) + response = completion.choices[0].message + if response.parsed: + return response.parsed + elif response.refusal: + # handle refusal + raise RuntimeError(f"Refusal: {response.refusal}") + except Exception as e: + raise RuntimeError(f"CreateChatCompletionStructured failed, err: {e}") from e + + async def get_chat_completions_stream(self, messages, tools = None, listener = None): + req = { + "model": self.config.model, + "messages": [ + { + "role": "system", + "content": self.config.prompt, + }, + *messages, + ], + "tools": tools, + "temperature": self.config.temperature, + "top_p": self.config.top_p, + "presence_penalty": self.config.presence_penalty, + "frequency_penalty": self.config.frequency_penalty, + "max_tokens": self.config.max_tokens, + "seed": self.config.seed, + "stream": True, + } + + try: + response = await self.client.chat.completions.create(**req) + except Exception as e: + raise RuntimeError(f"CreateChatCompletionStream failed, err: {e}") from e + + full_content = "" + + async for chat_completion in response: + choice = chat_completion.choices[0] + delta = choice.delta + content = delta.content if delta and delta.content else "" + # Emit content update event (fire-and-forget) + if listener and content: + listener.emit('content_update', content) + + full_content += content + # Check for tool calls + if delta.tool_calls: + for tool_call in delta.tool_calls: + # Emit tool call event (fire-and-forget) + if listener: + listener.emit('tool_call', tool_call) + + # Emit content finished event after the loop completes + if listener: + listener.emit('content_finished', full_content) \ No newline at end of file diff --git a/agents/ten_packages/extension/computer_tool_python/property.json b/agents/ten_packages/extension/computer_tool_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..ccab6ed3a55e17772efa2c03dde6d1f999e2993a --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/property.json @@ -0,0 +1,10 @@ +{ + "base_url": "", + "api_key": "${env:OPENAI_API_KEY}", + "frequency_penalty": 0.9, + "model": "gpt-4o", + "max_tokens": 512, + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}", + "max_memory_length": 10 +} \ No newline at end of file diff --git a/agents/ten_packages/extension/computer_tool_python/requirements.txt b/agents/ten_packages/extension/computer_tool_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..44836d6b059c4c98c9b2dd9acac8dff608bb920a --- /dev/null +++ b/agents/ten_packages/extension/computer_tool_python/requirements.txt @@ -0,0 +1,3 @@ +openai +requests[socks] +pydantic \ No newline at end of file diff --git a/agents/ten_packages/extension/cosy_tts_python/README.md b/agents/ten_packages/extension/cosy_tts_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2f0cd08f34bc2a4e6173ca2c1a4c6054541299fc --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/README.md @@ -0,0 +1,29 @@ +# cosy_tts_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/cosy_tts_python/__init__.py b/agents/ten_packages/extension/cosy_tts_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/cosy_tts_python/addon.py b/agents/ten_packages/extension/cosy_tts_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..ad4d8df01f2f6097588b48aa41a6aa2796517a57 --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("cosy_tts_python") +class CosyTTSExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import CosyTTSExtension + ten_env.log_info("CosyTTSExtensionAddon on_create_instance") + ten_env.on_create_instance_done(CosyTTSExtension(name), context) diff --git a/agents/ten_packages/extension/cosy_tts_python/cosy_tts.py b/agents/ten_packages/extension/cosy_tts_python/cosy_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..3fc5391a4319bf48760df4e8f40aa0f8dc1bcdae --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/cosy_tts.py @@ -0,0 +1,110 @@ +import asyncio +from dataclasses import dataclass + +from websocket import WebSocketConnectionClosedException + +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig +import dashscope +from dashscope.audio.tts_v2 import SpeechSynthesizer, AudioFormat, ResultCallback + + +@dataclass +class CosyTTSConfig(BaseConfig): + api_key: str = "" + voice: str = "longxiaochun" + model: str = "cosyvoice-v1" + sample_rate: int = 16000 + + +class AsyncIteratorCallback(ResultCallback): + def __init__(self, ten_env: AsyncTenEnv, queue: asyncio.Queue) -> None: + self.closed = False + self.ten_env = ten_env + self.loop = asyncio.get_event_loop() + self.queue = queue + + def close(self): + self.closed = True + + def on_open(self): + self.ten_env.log_info("websocket is open.") + + def on_complete(self): + self.ten_env.log_info("speech synthesis task complete successfully.") + + def on_error(self, message: str): + self.ten_env.log_error(f"speech synthesis task failed, {message}") + + def on_close(self): + self.ten_env.log_info("websocket is closed.") + self.close() + + def on_event(self, message: str) -> None: + self.ten_env.log_debug(f"received event: {message}") + + def on_data(self, data: bytes) -> None: + if self.closed: + self.ten_env.log_warn( + f"received data: {len(data)} bytes but connection was closed" + ) + return + self.ten_env.log_debug(f"received data: {len(data)} bytes") + asyncio.run_coroutine_threadsafe(self.queue.put(data), self.loop) + + +class CosyTTS: + def __init__(self, config: CosyTTSConfig) -> None: + self.config = config + self.synthesizer = None # Initially no synthesizer + self.queue = asyncio.Queue() + dashscope.api_key = config.api_key + + def _create_synthesizer( + self, ten_env: AsyncTenEnv, callback: AsyncIteratorCallback + ): + if self.synthesizer: + self.synthesizer = None + + ten_env.log_info("Creating new synthesizer") + self.synthesizer = SpeechSynthesizer( + model=self.config.model, + voice=self.config.voice, + format=AudioFormat.PCM_16000HZ_MONO_16BIT, + callback=callback, + ) + + async def get_audio_bytes(self) -> bytes: + return await self.queue.get() + + def text_to_speech_stream( + self, ten_env: AsyncTenEnv, text: str, end_of_segment: bool + ) -> None: + try: + callback = AsyncIteratorCallback(ten_env, self.queue) + + if not self.synthesizer or end_of_segment: + self._create_synthesizer(ten_env, callback) + + self.synthesizer.streaming_call(text) + + if end_of_segment: + ten_env.log_info("Streaming complete") + self.synthesizer.streaming_complete() + self.synthesizer = None + except WebSocketConnectionClosedException as e: + ten_env.log_error(f"WebSocket connection closed, {e}") + self.synthesizer = None + except Exception as e: + ten_env.log_error(f"Error streaming text, {e}") + self.synthesizer = None + + def cancel(self, ten_env: AsyncTenEnv) -> None: + if self.synthesizer: + try: + self.synthesizer.streaming_cancel() + except WebSocketConnectionClosedException as e: + ten_env.log_error(f"WebSocket connection closed, {e}") + except Exception as e: + ten_env.log_error(f"Error cancelling streaming, {e}") + self.synthesizer = None diff --git a/agents/ten_packages/extension/cosy_tts_python/extension.py b/agents/ten_packages/extension/cosy_tts_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..982476cfd16f094e07bc3a02c328e1059a9c762f --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/extension.py @@ -0,0 +1,58 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import asyncio +from .cosy_tts import CosyTTS, CosyTTSConfig +from ten import ( + AsyncTenEnv, +) +from ten_ai_base.tts import AsyncTTSBaseExtension + + +class CosyTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + self.client = None + self.config = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + + self.config = await CosyTTSConfig.create_async(ten_env=ten_env) + self.client = CosyTTS(self.config) + + asyncio.create_task(self._process_audio_data(ten_env)) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + await self.queue.put(None) + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def _process_audio_data(self, ten_env: AsyncTenEnv) -> None: + while True: + audio_data = await self.client.get_audio_bytes() + + if audio_data is None: + break + + await self.send_audio_out(ten_env, audio_data) + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + self.client.text_to_speech_stream(ten_env, input_text, end_of_segment) + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + self.client.cancel(ten_env) diff --git a/agents/ten_packages/extension/cosy_tts_python/manifest.json b/agents/ten_packages/extension/cosy_tts_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..06da42cdc1176799823a6c091751de5d25067831 --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/manifest.json @@ -0,0 +1,64 @@ +{ + "type": "extension", + "name": "cosy_tts_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "voice": { + "type": "string" + }, + "model": { + "type": "string" + }, + "sample_rate": { + "type": "int64" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/cosy_tts_python/property.json b/agents/ten_packages/extension/cosy_tts_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..db3baa0a398b7ed6f4688465c8a817e5af4fb7b6 --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/property.json @@ -0,0 +1,6 @@ +{ + "api_key": "${env:QWEN_API_KEY}", + "model": "cosyvoice-v1", + "voice": "longxiaochun", + "sample_rate": 16000 +} \ No newline at end of file diff --git a/agents/ten_packages/extension/cosy_tts_python/requirements.txt b/agents/ten_packages/extension/cosy_tts_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5899464f479117dceab904f0dbe7a4f7459cf02e --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/requirements.txt @@ -0,0 +1 @@ +dashscope \ No newline at end of file diff --git a/agents/ten_packages/extension/cosy_tts_python/tests/bin/start b/agents/ten_packages/extension/cosy_tts_python/tests/bin/start new file mode 100644 index 0000000000000000000000000000000000000000..04d784ea179c32ded5fc50565fb28b4ae0585c6b --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/tests/bin/start @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")/../.." + +export PYTHONPATH=.ten/app:.ten/app/ten_packages/system/ten_runtime_python/lib:.ten/app/ten_packages/system/ten_runtime_python/interface:.ten/app/ten_packages/system/ten_ai_base/interface:$PYTHONPATH + +# If the Python app imports some modules that are compiled with a different +# version of libstdc++ (ex: PyTorch), the Python app may encounter confusing +# errors. To solve this problem, we can preload the correct version of +# libstdc++. +# +# export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6 +# +# Another solution is to make sure the module 'ten_runtime_python' is imported +# _after_ the module that requires another version of libstdc++ is imported. +# +# Refer to https://github.com/pytorch/pytorch/issues/102360?from_wecom=1#issuecomment-1708989096 + +pytest tests/ "$@" \ No newline at end of file diff --git a/agents/ten_packages/extension/cosy_tts_python/tests/conftest.py b/agents/ten_packages/extension/cosy_tts_python/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2175e36e06ea1b6b40e07c5cf1e134ee1aec17 --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/tests/conftest.py @@ -0,0 +1,36 @@ +# +# Copyright © 2025 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +import pytest +import sys +import os +from ten import ( + unregister_all_addons_and_cleanup, +) + + +@pytest.fixture(scope="session", autouse=True) +def global_setup_and_teardown(): + # Set the environment variable. + os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] = "true" + + # Verify the environment variable is correctly set. + if ( + "TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE" not in os.environ + or os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] != "true" + ): + print( + "Failed to set TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE", + file=sys.stderr, + ) + sys.exit(1) + + # Yield control to the test; after the test execution is complete, continue + # with the teardown process. + yield + + # Teardown part. + unregister_all_addons_and_cleanup() \ No newline at end of file diff --git a/agents/ten_packages/extension/cosy_tts_python/tests/test_basic.py b/agents/ten_packages/extension/cosy_tts_python/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..5b0800572347755b2f38ca84b5b04880bb09b4e2 --- /dev/null +++ b/agents/ten_packages/extension/cosy_tts_python/tests/test_basic.py @@ -0,0 +1,35 @@ +# +# Copyright © 2024 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +from pathlib import Path +from ten import ExtensionTester, TenEnvTester, Cmd, CmdResult, StatusCode + + +class ExtensionTesterBasic(ExtensionTester): + def check_hello(self, ten_env: TenEnvTester, result: CmdResult): + statusCode = result.get_status_code() + print("receive hello_world, status:" + str(statusCode)) + + if statusCode == StatusCode.OK: + ten_env.stop_test() + + def on_start(self, ten_env: TenEnvTester) -> None: + new_cmd = Cmd.create("hello_world") + + print("send hello_world") + ten_env.send_cmd( + new_cmd, + lambda ten_env, result, _: self.check_hello(ten_env, result), + ) + + print("tester on_start_done") + ten_env.on_start_done() + + +def test_basic(): + tester = ExtensionTesterBasic() + tester.set_test_mode_single("cosy_tts_python") + tester.run() diff --git a/agents/ten_packages/extension/coze_python_async/README.md b/agents/ten_packages/extension/coze_python_async/README.md new file mode 100644 index 0000000000000000000000000000000000000000..16e3fd211f6d50878c68cb36297e097335dbbce8 --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/README.md @@ -0,0 +1,37 @@ +# coze_python_async + +This is a python extension for coze service. The schema of coze service is attached in `schema.yml`. + +An example of OpenAI wrapper is also attached in `examples/openai_wrapper.py`. + +## Features + +The extension will record history with count of `max_history`. + +- `api_url` (must have): the url for the coze service. +- `token` (must have): use Bearer token to support default auth + +The extension support flush that will close the existing http session. + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + +- In: + - `text_data` [data]: the asr result + - `flush` [cmd]: the flush signal +- Out: + - `flush` [cmd]: the flush signal + +## Examples + +You can run example using following command, and the wrapper service will listen 8000 by default. + +``` +> export API_TOKEN="xxx" && export OPENAI_API_KEY="xxx" && python3 openai_wrapper.py + +INFO: Started server process [162886] +INFO: Waiting for application startup. +INFO: Application startup complete. +INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) +``` diff --git a/agents/ten_packages/extension/coze_python_async/__init__.py b/agents/ten_packages/extension/coze_python_async/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c22fdd7cd08a97dfe876dad2d4bb86e7e30f0e15 --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/__init__.py @@ -0,0 +1,7 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon + diff --git a/agents/ten_packages/extension/coze_python_async/addon.py b/agents/ten_packages/extension/coze_python_async/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..364777f7168f996032f3e5f2414acda1c5b389af --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("coze_python_async") +class AsyncCozeExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import AsyncCozeExtension + ten_env.log_info("AsyncCozeExtensionAddon on_create_instance") + ten_env.on_create_instance_done(AsyncCozeExtension(name), context) diff --git a/agents/ten_packages/extension/coze_python_async/extension.py b/agents/ten_packages/extension/coze_python_async/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..b6b3b731086d23d6852957193f44b62a75f745e5 --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/extension.py @@ -0,0 +1,373 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import asyncio +import traceback +import aiohttp +import json +import copy + +from typing import List, Any, AsyncGenerator +from dataclasses import dataclass + +from cozepy import ChatEventType, Message, TokenAuth, AsyncCoze, ChatEvent, Chat + +from ten import ( + AudioFrame, + VideoFrame, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) + +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ChatMemory +from ten_ai_base.types import ( + LLMChatCompletionUserMessageParam, + LLMCallCompletionArgs, + LLMDataCompletionArgs, + LLMToolMetadata, +) +from ten_ai_base.llm import ( + AsyncLLMBaseExtension, +) + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" +CMD_OUT_TOOL_CALL = "tool_call" + +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" + +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT = "end_of_segment" + +CMD_PROPERTY_RESULT = "tool_result" + + +def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + +def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" + + remain = current_sentence + return sentences, remain + + +@dataclass +class CozeConfig(BaseConfig): + base_url: str = "https://api.acoze.com" + bot_id: str = "" + token: str = "" + user_id: str = "TenAgent" + greeting: str = "" + max_history: int = 32 + + +class AsyncCozeExtension(AsyncLLMBaseExtension): + config: CozeConfig = None + sentence_fragment: str = "" + ten_env: AsyncTenEnv = None + loop: asyncio.AbstractEventLoop = None + stopped: bool = False + users_count = 0 + memory: ChatMemory = None + + acoze: AsyncCoze = None + # conversation: str = "" + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + + self.loop = asyncio.get_event_loop() + + self.config = await CozeConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.bot_id or not self.config.token: + ten_env.log_error("Missing required configuration") + return + + self.memory = ChatMemory(self.config.max_history) + try: + self.acoze = AsyncCoze( + auth=TokenAuth(token=self.config.token), base_url=self.config.base_url + ) + + # self.conversation = await self.acoze.conversations.create(messages = [ + # Message.build_user_question_text(self.config.prompt) + # ] if self.config.prompt else []) + + except Exception as e: + ten_env.log_error(f"Failed to create conversation {e}") + + self.ten_env = ten_env + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + self.stopped = True + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + status = StatusCode.OK + detail = "success" + + if cmd_name == CMD_IN_FLUSH: + await self.flush_input_items(ten_env) + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + ten_env.log_info("on flush") + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.config.greeting and self.users_count == 1: + self.send_text_output(ten_env, self.config.greeting, True) + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(status) + cmd_result.set_property_string("detail", detail) + await ten_env.return_result(cmd_result, cmd) + + async def on_call_chat_completion( + self, ten_env: AsyncTenEnv, **kargs: LLMCallCompletionArgs + ) -> any: + raise RuntimeError("Not implemented") + + async def on_data_chat_completion( + self, ten_env: AsyncTenEnv, **kargs: LLMDataCompletionArgs + ) -> None: + if not self.acoze: + await self._send_text( + "Coze is not connected. Please check your configuration.", True + ) + return + + input_messages: LLMChatCompletionUserMessageParam = kargs.get("messages", []) + messages = copy.copy(self.memory.get()) + if not input_messages: + ten_env.log_warn("No message in data") + else: + messages.extend(input_messages) + for i in input_messages: + self.memory.put(i) + + total_output = "" + sentence_fragment = "" + calls = {} + + sentences = [] + self.ten_env.log_info(f"messages: {messages}") + response = self._stream_chat(messages=messages) + async for message in response: + self.ten_env.log_info(f"content: {message}") + try: + if message.event == ChatEventType.CONVERSATION_MESSAGE_DELTA: + total_output += message.message.content + sentences, sentence_fragment = parse_sentences( + sentence_fragment, message.message.content + ) + for s in sentences: + await self._send_text(s, False) + elif message.event == ChatEventType.CONVERSATION_MESSAGE_COMPLETED: + if sentence_fragment: + await self._send_text(sentence_fragment, True) + else: + await self._send_text("", True) + elif message.event == ChatEventType.CONVERSATION_CHAT_FAILED: + last_error = message.chat.last_error + if last_error and last_error.code == 4011: + await self._send_text( + "The Coze token has been depleted. Please check your token usage.", + True, + ) + else: + await self._send_text(last_error.msg, True) + except Exception as e: + self.ten_env.log_error(f"Failed to parse response: {message} {e}") + traceback.print_exc() + + self.memory.put({"role": "assistant", "content": total_output}) + self.ten_env.log_info(f"total_output: {total_output} {calls}") + + async def on_tools_update( + self, ten_env: AsyncTenEnv, tool: LLMToolMetadata + ) -> None: + # Implement the logic for tool updates + return await super().on_tools_update(ten_env, tool) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + data_name = data.get_name() + ten_env.log_info("on_data name {}".format(data_name)) + + is_final = False + input_text = "" + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + except Exception as err: + ten_env.log_info( + f"GetProperty optional {DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL} failed, err: {err}" + ) + + try: + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + except Exception as err: + ten_env.log_info( + f"GetProperty optional {DATA_IN_TEXT_DATA_PROPERTY_TEXT} failed, err: {err}" + ) + + if not is_final: + ten_env.log_info("ignore non-final input") + return + if not input_text: + ten_env.log_info("ignore empty text") + return + + ten_env.log_info(f"OnData input text: [{input_text}]") + + # Start an asynchronous task for handling chat completion + message = LLMChatCompletionUserMessageParam(role="user", content=input_text) + await self.queue_input_item(False, messages=[message]) + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + pass + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + pass + + async def _send_text(self, text: str, end_of_segment: bool) -> None: + data = Data.create("text_data") + data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, text) + data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT, end_of_segment + ) + asyncio.create_task(self.ten_env.send_data(data)) + + async def _stream_chat( + self, messages: List[Any] + ) -> AsyncGenerator[ChatEvent, None]: + additionals = [] + for m in messages: + if m["role"] == "user": + additionals.append( + Message.build_user_question_text(m["content"]).model_dump() + ) + elif m["role"] == "assistant": + additionals.append( + Message.build_assistant_answer(m["content"]).model_dump() + ) + + def chat_stream_handler(event: str, event_data: Any) -> ChatEvent: + if event == ChatEventType.DONE: + raise StopAsyncIteration + elif event == ChatEventType.ERROR: + raise RuntimeError(f"error event: {event_data}") + elif event in [ + ChatEventType.CONVERSATION_MESSAGE_DELTA, + ChatEventType.CONVERSATION_MESSAGE_COMPLETED, + ]: + return ChatEvent( + event=event, message=Message.model_validate_json(event_data) + ) + elif event in [ + ChatEventType.CONVERSATION_CHAT_CREATED, + ChatEventType.CONVERSATION_CHAT_IN_PROGRESS, + ChatEventType.CONVERSATION_CHAT_COMPLETED, + ChatEventType.CONVERSATION_CHAT_FAILED, + ChatEventType.CONVERSATION_CHAT_REQUIRES_ACTION, + ]: + return ChatEvent(event=event, chat=Chat.model_validate_json(event_data)) + else: + raise ValueError(f"invalid chat.event: {event}, {event_data}") + + async with aiohttp.ClientSession() as session: + try: + url = f"{self.config.base_url}/v3/chat" + headers = { + "Authorization": f"Bearer {self.config.token}", + } + params = { + "bot_id": self.config.bot_id, + "user_id": self.config.user_id, + "additional_messages": additionals, + "stream": True, + "auto_save_history": True, + # "conversation_id": self.conversation.id + } + event = "" + async with session.post(url, json=params, headers=headers) as response: + async for line in response.content: + if line: + try: + self.ten_env.log_info(f"line: {line}") + decoded_line = line.decode("utf-8").strip() + if decoded_line: + if decoded_line.startswith("data:"): + data = decoded_line[5:].strip() + yield chat_stream_handler( + event=event, event_data=data.strip() + ) + elif decoded_line.startswith("event:"): + event = decoded_line[6:] + self.ten_env.log_info(f"event: {event}") + if event == "done": + break + else: + result = json.loads(decoded_line) + code = result.get("code", 0) + if code == 4000: + await self._send_text( + "Coze bot is not published.", True + ) + else: + self.ten_env.log_error( + f"Failed to stream chat: {result['code']}" + ) + await self._send_text( + "Coze bot is not connected. Please check your configuration.", + True, + ) + except Exception as e: + self.ten_env.log_error(f"Failed to stream chat: {e}") + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to stream chat: {e}") + finally: + await session.close() diff --git a/agents/ten_packages/extension/coze_python_async/manifest.json b/agents/ten_packages/extension/coze_python_async/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..47c5b3ac4a3bcfd38d84e455f84675abf9e0a0d2 --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/manifest.json @@ -0,0 +1,75 @@ +{ + "type": "extension", + "name": "coze_python_async", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "base_url": { + "type": "string" + }, + "bot_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "user_id": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "greeting": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/coze_python_async/property.json b/agents/ten_packages/extension/coze_python_async/property.json new file mode 100644 index 0000000000000000000000000000000000000000..a285733bff4a394c1abdb132308ff082dab61e9f --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/property.json @@ -0,0 +1,7 @@ +{ + "token": "${env:COZE_TOKEN}", + "bot_id": "${env:COZE_BOT_ID}", + "base_url": "https://api.coze.cn", + "prompt": "", + "greeting": "TEN Agent connected with Coze. How can I help you today?" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/coze_python_async/requirements.txt b/agents/ten_packages/extension/coze_python_async/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..be48f5e7d3e3ea2e78d08b8108a03562773b51c0 --- /dev/null +++ b/agents/ten_packages/extension/coze_python_async/requirements.txt @@ -0,0 +1 @@ +cozepy==0.6.2 \ No newline at end of file diff --git a/agents/ten_packages/extension/deepgram_asr_python/__init__.py b/agents/ten_packages/extension/deepgram_asr_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c731cdd58a08acbf5c417ff0e4e5fea0afcb0f --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/__init__.py @@ -0,0 +1 @@ +from . import addon diff --git a/agents/ten_packages/extension/deepgram_asr_python/addon.py b/agents/ten_packages/extension/deepgram_asr_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e6f467639ffecc0d999c7d194191a403f46c73 --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/addon.py @@ -0,0 +1,12 @@ +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + +@register_addon_as_extension("deepgram_asr_python") +class DeepgramASRExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + from .extension import DeepgramASRExtension + ten.log_info("on_create_instance") + ten.on_create_instance_done(DeepgramASRExtension(addon_name), context) diff --git a/agents/ten_packages/extension/deepgram_asr_python/config.py b/agents/ten_packages/extension/deepgram_asr_python/config.py new file mode 100644 index 0000000000000000000000000000000000000000..6fa5f16fce929b4e3fb38252e588f9bd5302f4cc --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/config.py @@ -0,0 +1,26 @@ +from typing import Union + +class DeepgramConfig: + def __init__(self, + api_key: str, + language: str, + model: str, + sample_rate: Union[str, int]): + self.api_key = api_key + self.language = language + self.model = model + self.sample_rate = int(sample_rate) + + self.channels = 1 + self.encoding = 'linear16' + self.interim_results = True + self.punctuate = True + + @classmethod + def default_config(cls): + return cls( + api_key="", + language="en-US", + model="nova-2", + sample_rate=16000 + ) \ No newline at end of file diff --git a/agents/ten_packages/extension/deepgram_asr_python/extension.py b/agents/ten_packages/extension/deepgram_asr_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..48e516d5620641f7b98c14cf99435737b83a21c7 --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/extension.py @@ -0,0 +1,178 @@ +from ten import ( + AsyncExtension, + AsyncTenEnv, + Cmd, + Data, + AudioFrame, + StatusCode, + CmdResult, +) + +import asyncio + +from deepgram import ( + AsyncListenWebSocketClient, + DeepgramClientOptions, + LiveTranscriptionEvents, + LiveOptions, +) +from dataclasses import dataclass + +from ten_ai_base.config import BaseConfig + +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_OUT_TEXT_DATA_PROPERTY_STREAM_ID = "stream_id" +DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT = "end_of_segment" + + +@dataclass +class DeepgramASRConfig(BaseConfig): + api_key: str = "" + language: str = "en-US" + model: str = "nova-2" + sample_rate: int = 16000 + + channels: int = 1 + encoding: str = "linear16" + interim_results: bool = True + punctuate: bool = True + + +class DeepgramASRExtension(AsyncExtension): + def __init__(self, name: str): + super().__init__(name) + + self.stopped = False + self.connected = False + self.client: AsyncListenWebSocketClient = None + self.config: DeepgramASRConfig = None + self.ten_env: AsyncTenEnv = None + self.loop = None + self.stream_id = -1 + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("DeepgramASRExtension on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("on_start") + self.loop = asyncio.get_event_loop() + self.ten_env = ten_env + + self.config = await DeepgramASRConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.api_key: + ten_env.log_error("get property api_key") + return + + self.loop.create_task(self._start_listen()) + + ten_env.log_info("starting async_deepgram_wrapper thread") + + async def on_audio_frame(self, _: AsyncTenEnv, frame: AudioFrame) -> None: + frame_buf = frame.get_buf() + + if not frame_buf: + self.ten_env.log_warn("send_frame: empty pcm_frame detected.") + return + + if not self.connected: + self.ten_env.log_debug("send_frame: deepgram not connected.") + return + + self.stream_id = frame.get_property_int("stream_id") + if self.client: + await self.client.send(frame_buf) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("on_stop") + + self.stopped = True + + if self.client: + await self.client.finish() + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_json = cmd.to_json() + ten_env.log_info(f"on_cmd json: {cmd_json}") + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "success") + await ten_env.return_result(cmd_result, cmd) + + async def _start_listen(self) -> None: + self.ten_env.log_info("start and listen deepgram") + + self.client = AsyncListenWebSocketClient( + config=DeepgramClientOptions( + api_key=self.config.api_key, options={"keepalive": "true"} + ) + ) + + async def on_open(_, event): + self.ten_env.log_info(f"deepgram event callback on_open: {event}") + self.connected = True + + async def on_close(_, event): + self.ten_env.log_info(f"deepgram event callback on_close: {event}") + self.connected = False + if not self.stopped: + self.ten_env.log_warn( + "Deepgram connection closed unexpectedly. Reconnecting..." + ) + await asyncio.sleep(0.2) + self.loop.create_task(self._start_listen()) + + async def on_message(_, result): + sentence = result.channel.alternatives[0].transcript + + if len(sentence) == 0: + return + + is_final = result.is_final + self.ten_env.log_info( + f"deepgram got sentence: [{sentence}], is_final: {is_final}, stream_id: {self.stream_id}" + ) + + await self._send_text( + text=sentence, is_final=is_final, stream_id=self.stream_id + ) + + async def on_error(_, error): + self.ten_env.log_error(f"deepgram event callback on_error: {error}") + + self.client.on(LiveTranscriptionEvents.Open, on_open) + self.client.on(LiveTranscriptionEvents.Close, on_close) + self.client.on(LiveTranscriptionEvents.Transcript, on_message) + self.client.on(LiveTranscriptionEvents.Error, on_error) + + options = LiveOptions( + language=self.config.language, + model=self.config.model, + sample_rate=self.config.sample_rate, + channels=self.config.channels, + encoding=self.config.encoding, + interim_results=self.config.interim_results, + punctuate=self.config.punctuate, + ) + + self.ten_env.log_info(f"deepgram options: {options}") + # connect to websocket + result = await self.client.start(options) + if not result: + self.ten_env.log_error("failed to connect to deepgram") + await asyncio.sleep(0.2) + self.loop.create_task(self._start_listen()) + else: + self.ten_env.log_info("successfully connected to deepgram") + + async def _send_text(self, text: str, is_final: bool, stream_id: str) -> None: + stable_data = Data.create("text_data") + stable_data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_IS_FINAL, is_final) + stable_data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, text) + stable_data.set_property_int(DATA_OUT_TEXT_DATA_PROPERTY_STREAM_ID, stream_id) + stable_data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT, is_final + ) + asyncio.create_task(self.ten_env.send_data(stable_data)) diff --git a/agents/ten_packages/extension/deepgram_asr_python/manifest.json b/agents/ten_packages/extension/deepgram_asr_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..a09d0077c0732963977f4a463a44d01d2ec30d65 --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/manifest.json @@ -0,0 +1,88 @@ +{ + "type": "extension", + "name": "deepgram_asr_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model": { + "type": "string" + }, + "language": { + "type": "string" + }, + "sample_rate": { + "type": "int64" + } + }, + "audio_frame_in": [ + { + "name": "pcm_frame", + "property": {} + } + ], + "cmd_in": [ + { + "name": "on_user_joined", + "property": { + "user_id": { + "type": "string" + } + } + }, + { + "name": "on_user_left", + "property": { + "user_id": { + "type": "string" + } + } + }, + { + "name": "on_connection_failure", + "property": { + "error": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "time": { + "type": "int64" + }, + "duration_ms": { + "type": "int64" + }, + "language": { + "type": "string" + }, + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + }, + "stream_id": { + "type": "uint32" + }, + "end_of_segment": { + "type": "bool" + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/deepgram_asr_python/property.json b/agents/ten_packages/extension/deepgram_asr_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9bdc266716277b73a47408500a30f055009cfee3 --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/property.json @@ -0,0 +1,6 @@ +{ + "api_key": "${env:DEEPGRAM_API_KEY}", + "language": "en-US", + "model": "nova-2", + "sample_rate": 16000 +} \ No newline at end of file diff --git a/agents/ten_packages/extension/deepgram_asr_python/requirements.txt b/agents/ten_packages/extension/deepgram_asr_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8264cab3744126b94be1f9632f6ee5dc1e5fca0 --- /dev/null +++ b/agents/ten_packages/extension/deepgram_asr_python/requirements.txt @@ -0,0 +1,2 @@ +deepgram-sdk==3.7.5 +websockets==13.1 \ No newline at end of file diff --git a/agents/ten_packages/extension/dify_python/README.md b/agents/ten_packages/extension/dify_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..72ec4a330a979247d78bbfcd745eebc11e53044a --- /dev/null +++ b/agents/ten_packages/extension/dify_python/README.md @@ -0,0 +1,29 @@ +# dify_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/dify_python/__init__.py b/agents/ten_packages/extension/dify_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/dify_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/dify_python/addon.py b/agents/ten_packages/extension/dify_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..fec493b7442c72fcacaa4886eb92f5d2b81203a4 --- /dev/null +++ b/agents/ten_packages/extension/dify_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import DifyExtension + + +@register_addon_as_extension("dify_python") +class DifyExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + ten_env.log_info("DifyExtensionAddon on_create_instance") + ten_env.on_create_instance_done(DifyExtension(name), context) diff --git a/agents/ten_packages/extension/dify_python/extension.py b/agents/ten_packages/extension/dify_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..4c3c60c0ab110381c69f519c8a94feff52952f4f --- /dev/null +++ b/agents/ten_packages/extension/dify_python/extension.py @@ -0,0 +1,293 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import asyncio +import json +import time +import traceback +from dataclasses import dataclass +from typing import AsyncGenerator + +import aiohttp +from ten import AsyncTenEnv, AudioFrame, Cmd, CmdResult, Data, StatusCode, VideoFrame +from ten_ai_base.config import BaseConfig +from ten_ai_base.types import LLMChatCompletionUserMessageParam, LLMDataCompletionArgs +from ten_ai_base.llm import ( + AsyncLLMBaseExtension, +) + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" +CMD_OUT_TOOL_CALL = "tool_call" + +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" + +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT = "end_of_segment" + +CMD_PROPERTY_RESULT = "tool_result" + + +def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + +def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" + + remain = current_sentence + return sentences, remain + + +@dataclass +class DifyConfig(BaseConfig): + base_url: str = "https://api.dify.ai/v1" + api_key: str = "" + user_id: str = "TenAgent" + greeting: str = "" + failure_info: str = "" + max_history: int = 32 + + +class DifyExtension(AsyncLLMBaseExtension): + config: DifyConfig = None + ten_env: AsyncTenEnv = None + loop: asyncio.AbstractEventLoop = None + stopped: bool = False + users_count = 0 + conversational_id = "" + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.loop = asyncio.get_event_loop() + + self.config = await DifyConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.api_key: + ten_env.log_error("Missing required configuration") + return + + self.ten_env = ten_env + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + self.stopped = True + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + status = StatusCode.OK + detail = "success" + + if cmd_name == CMD_IN_FLUSH: + await self.flush_input_items(ten_env) + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + ten_env.log_info("on flush") + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.config.greeting and self.users_count == 1: + self.send_text_output(ten_env, self.config.greeting, True) + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(status) + cmd_result.set_property_string("detail", detail) + await ten_env.return_result(cmd_result, cmd) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + data_name = data.get_name() + ten_env.log_info("on_data name {}".format(data_name)) + + is_final = False + input_text = "" + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + except Exception as err: + ten_env.log_info( + f"GetProperty optional {DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL} failed, err: {err}" + ) + + try: + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + except Exception as err: + ten_env.log_info( + f"GetProperty optional {DATA_IN_TEXT_DATA_PROPERTY_TEXT} failed, err: {err}" + ) + + if not is_final: + ten_env.log_info("ignore non-final input") + return + if not input_text: + ten_env.log_info("ignore empty text") + return + + ten_env.log_info(f"OnData input text: [{input_text}]") + + # Start an asynchronous task for handling chat completion + message = LLMChatCompletionUserMessageParam(role="user", content=input_text) + await self.queue_input_item(False, messages=[message]) + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + pass + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + pass + + async def on_call_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError + + async def on_tools_update(self, async_ten_env, tool): + raise NotImplementedError + + async def on_data_chat_completion( + self, ten_env: AsyncTenEnv, **kargs: LLMDataCompletionArgs + ) -> None: + input_messages: LLMChatCompletionUserMessageParam = kargs.get("messages", []) + if not input_messages: + ten_env.log_warn("No message in data") + + total_output = "" + sentence_fragment = "" + calls = {} + + sentences = [] + self.ten_env.log_info(f"messages: {input_messages}") + response = self._stream_chat(query=input_messages[0]["content"]) + async for message in response: + # self.ten_env.log_info(f"content: {message}") + message_type = message.get("event") + if message_type == "message" or message_type == "agent_message": + if not self.conversational_id and message.get("conversation_id"): + self.conversational_id = message["conversation_id"] + ten_env.log_info(f"conversation_id: {self.conversational_id}") + + total_output += message.get("answer", "") + sentences, sentence_fragment = parse_sentences( + sentence_fragment, message.get("answer", "") + ) + for s in sentences: + await self._send_text(s, False) + elif message_type == "message_end": + metadata = message.get("metadata", {}) + ten_env.log_info(f"metadata: {metadata}") + elif message_type == "error": + err_message = message.get("message", {}) + ten_env.log_error(f"error: {err_message}") + await self._send_text(err_message, True) + + # data: {"event": "message", "task_id": "900bbd43-dc0b-4383-a372-aa6e6c414227", "id": "663c5084-a254-4040-8ad3-51f2a3c1a77c", "answer": "Hi", "created_at": 1705398420}\n\n + + # try: + # if message.event == ChatEventType.CONVERSATION_MESSAGE_DELTA: + # total_output += message.message.content + # sentences, sentence_fragment = parse_sentences( + # sentence_fragment, message.message.content) + # for s in sentences: + # await self._send_text(s, False) + # elif message.event == ChatEventType.CONVERSATION_MESSAGE_COMPLETED: + # if sentence_fragment: + # await self._send_text(sentence_fragment, True) + # else: + # await self._send_text("", True) + # elif message.event == ChatEventType.CONVERSATION_CHAT_FAILED: + # last_error = message.chat.last_error + # if last_error and last_error.code == 4011: + # await self._send_text("The Coze token has been depleted. Please check your token usage.", True) + # else: + # await self._send_text(last_error.msg, True) + # except Exception as e: + # self.ten_env.log_error(f"Failed to parse response: {message} {e}") + # traceback.print_exc() + await self._send_text(sentence_fragment, True) + self.ten_env.log_info(f"total_output: {total_output} {calls}") + + async def _stream_chat(self, query: str) -> AsyncGenerator[dict, None]: + async with aiohttp.ClientSession() as session: + try: + payload = { + "inputs": {}, + "query": query, + "response_mode": "streaming", + } + if self.conversational_id: + payload["conversation_id"] = self.conversational_id + if self.config.user_id: + payload["user"] = self.config.user_id + self.ten_env.log_info(f"payload before sending: {json.dumps(payload)}") + headers = { + "Authorization": f"Bearer {self.config.api_key}", + "Content-Type": "application/json", + } + url = f"{self.config.base_url}/chat-messages" + start_time = time.time() + async with session.post(url, json=payload, headers=headers) as response: + if response.status != 200: + r = await response.json() + self.ten_env.log_error( + f"Received unexpected status {r} from the server." + ) + if self.config.failure_info: + await self._send_text(self.config.failure_info, True) + return + end_time = time.time() + self.ten_env.log_info(f"connect time {end_time - start_time} s") + + async for line in response.content: + if line: + l = line.decode("utf-8").strip() + if l.startswith("data:"): + content = l[5:].strip() + if content == "[DONE]": + break + self.ten_env.log_debug(f"content: {content}") + yield json.loads(content) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to handle {e}") + finally: + await session.close() + session = None + + async def _send_text(self, text: str, end_of_segment: bool) -> None: + data = Data.create("text_data") + data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, text) + data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT, end_of_segment + ) + asyncio.create_task(self.ten_env.send_data(data)) diff --git a/agents/ten_packages/extension/dify_python/manifest.json b/agents/ten_packages/extension/dify_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..ff7f1501f4cf74b22f5eb089cd87177e6166a01f --- /dev/null +++ b/agents/ten_packages/extension/dify_python/manifest.json @@ -0,0 +1,42 @@ +{ + "type": "extension", + "name": "dify_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "user_id": { + "type": "string" + }, + "api_key": { + "type": "string" + }, + "base_url": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "failure_info": { + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/dify_python/property.json b/agents/ten_packages/extension/dify_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..889afd276c6c67d43b6b5e29e847832d2cb7c703 --- /dev/null +++ b/agents/ten_packages/extension/dify_python/property.json @@ -0,0 +1,7 @@ +{ + "user_id": "User", + "api_key": "${env:DIFY_API_KEY}", + "base_url": "https://api.dify.ai/v1", + "greeting": "TEN Agent connected with Dify. How can I help you today?", + "failure_info": "Sorry, I am unable to process your request at the moment. Please check your dify configuration." +} \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts/elevenlabs_tts.go b/agents/ten_packages/extension/elevenlabs_tts/elevenlabs_tts.go new file mode 100644 index 0000000000000000000000000000000000000000..4d712e4f33d5dce4347e764b65ff9cd7531bed0b --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/elevenlabs_tts.go @@ -0,0 +1,82 @@ +/** + * + * Agora Real Time Engagement + * Created by XinHui Li in 2024-07. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "context" + "fmt" + "io" + "time" + + elevenlabs "github.com/haguro/elevenlabs-go" +) + +type elevenlabsTTS struct { + client *elevenlabs.Client + config elevenlabsTTSConfig +} + +type elevenlabsTTSConfig struct { + ApiKey string + ModelId string + OptimizeStreamingLatency int + RequestTimeoutSeconds int + SimilarityBoost float32 + SpeakerBoost bool + Stability float32 + Style float32 + VoiceId string +} + +func defaultElevenlabsTTSConfig() elevenlabsTTSConfig { + return elevenlabsTTSConfig{ + ApiKey: "", + ModelId: "eleven_multilingual_v2", + OptimizeStreamingLatency: 0, + RequestTimeoutSeconds: 30, + SimilarityBoost: 0.75, + SpeakerBoost: false, + Stability: 0.5, + Style: 0.0, + VoiceId: "pNInz6obpgDQGcFmaJgB", + } +} + +func newElevenlabsTTS(config elevenlabsTTSConfig) (*elevenlabsTTS, error) { + return &elevenlabsTTS{ + config: config, + client: elevenlabs.NewClient(context.Background(), config.ApiKey, time.Duration(config.RequestTimeoutSeconds)*time.Second), + }, nil +} + +func (e *elevenlabsTTS) textToSpeechStream(streamWriter io.Writer, text string) (err error) { + req := elevenlabs.TextToSpeechRequest{ + Text: text, + ModelID: e.config.ModelId, + VoiceSettings: &elevenlabs.VoiceSettings{ + SimilarityBoost: e.config.SimilarityBoost, + SpeakerBoost: e.config.SpeakerBoost, + Stability: e.config.Stability, + Style: e.config.Style, + }, + } + queries := []elevenlabs.QueryFunc{ + elevenlabs.LatencyOptimizations(e.config.OptimizeStreamingLatency), + elevenlabs.OutputFormat("pcm_16000"), + } + + err = e.client.TextToSpeechStream(streamWriter, e.config.VoiceId, req, queries...) + if err != nil { + return fmt.Errorf("TextToSpeechStream failed, err: %v", err) + } + + return nil +} diff --git a/agents/ten_packages/extension/elevenlabs_tts/elevenlabs_tts_extension.go b/agents/ten_packages/extension/elevenlabs_tts/elevenlabs_tts_extension.go new file mode 100644 index 0000000000000000000000000000000000000000..a1085c07ab3f1fc15580cc5eff940f8abf51ab99 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/elevenlabs_tts_extension.go @@ -0,0 +1,339 @@ +/** + * + * Agora Real Time Engagement + * Created by XinHui Li in 2024-07. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "ten_framework/ten" +) + +const ( + cmdInFlush = "flush" + cmdOutFlush = "flush" + dataInTextDataPropertyText = "text" + + propertyApiKey = "api_key" // Required + propertyModelId = "model_id" // Optional + propertyOptimizeStreamingLatency = "optimize_streaming_latency" // Optional + propertyRequestTimeoutSeconds = "request_timeout_seconds" // Optional + propertySimilarityBoost = "similarity_boost" // Optional + propertySpeakerBoost = "speaker_boost" // Optional + propertyStability = "stability" // Optional + propertyStyle = "style" // Optional + propertyVoiceId = "voice_id" // Optional +) + +const ( + textChanMax = 1024 +) + +var ( + outdateTs atomic.Int64 + textChan chan *message + wg sync.WaitGroup +) + +type elevenlabsTTSExtension struct { + ten.DefaultExtension + elevenlabsTTS *elevenlabsTTS +} + +type message struct { + text string + receivedTs int64 +} + +func newElevenlabsTTSExtension(name string) ten.Extension { + return &elevenlabsTTSExtension{} +} + +// OnStart will be called when the extension is starting, +// properies can be read here to initialize and start the extension. +// current supported properties: +// - api_key (required) +// - model_id +// - optimize_streaming_latency +// - request_timeout_seconds +// - similarity_boost +// - speaker_boost +// - stability +// - style +// - voice_id +func (e *elevenlabsTTSExtension) OnStart(ten ten.TenEnv) { + ten.LogInfo("OnStart") + + // prepare configuration + elevenlabsTTSConfig := defaultElevenlabsTTSConfig() + + if apiKey, err := ten.GetPropertyString(propertyApiKey); err != nil { + ten.LogError(fmt.Sprintf("GetProperty required %s failed, err: %v", propertyApiKey, err)) + return + } else { + elevenlabsTTSConfig.ApiKey = apiKey + } + + if modelId, err := ten.GetPropertyString(propertyModelId); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyModelId, err)) + } else { + if len(modelId) > 0 { + elevenlabsTTSConfig.ModelId = modelId + } + } + + if optimizeStreamingLatency, err := ten.GetPropertyInt64(propertyOptimizeStreamingLatency); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyOptimizeStreamingLatency, err)) + } else { + if optimizeStreamingLatency > 0 { + elevenlabsTTSConfig.OptimizeStreamingLatency = int(optimizeStreamingLatency) + } + } + + if requestTimeoutSeconds, err := ten.GetPropertyInt64(propertyRequestTimeoutSeconds); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyRequestTimeoutSeconds, err)) + } else { + if requestTimeoutSeconds > 0 { + elevenlabsTTSConfig.RequestTimeoutSeconds = int(requestTimeoutSeconds) + } + } + + if similarityBoost, err := ten.GetPropertyFloat64(propertySimilarityBoost); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertySimilarityBoost, err)) + } else { + elevenlabsTTSConfig.SimilarityBoost = float32(similarityBoost) + } + + if speakerBoost, err := ten.GetPropertyBool(propertySpeakerBoost); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertySpeakerBoost, err)) + } else { + elevenlabsTTSConfig.SpeakerBoost = speakerBoost + } + + if stability, err := ten.GetPropertyFloat64(propertyStability); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyStability, err)) + } else { + elevenlabsTTSConfig.Stability = float32(stability) + } + + if style, err := ten.GetPropertyFloat64(propertyStyle); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyStyle, err)) + } else { + elevenlabsTTSConfig.Style = float32(style) + } + + if voiceId, err := ten.GetPropertyString(propertyVoiceId); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyVoiceId, err)) + } else { + if len(voiceId) > 0 { + elevenlabsTTSConfig.VoiceId = voiceId + } + } + + // create elevenlabsTTS instance + elevenlabsTTS, err := newElevenlabsTTS(elevenlabsTTSConfig) + if err != nil { + ten.LogError(fmt.Sprintf("newElevenlabsTTS failed, err: %v", err)) + return + } + + ten.LogInfo(fmt.Sprintf("newElevenlabsTTS succeed with ModelId: %s, VoiceId: %s", + elevenlabsTTSConfig.ModelId, elevenlabsTTSConfig.VoiceId)) + + // set elevenlabsTTS instance + e.elevenlabsTTS = elevenlabsTTS + + // create pcm instance + pcm := newPcm(defaultPcmConfig()) + pcmFrameSize := pcm.getPcmFrameSize() + + // init chan + textChan = make(chan *message, textChanMax) + + go func() { + ten.LogInfo("process textChan") + + for msg := range textChan { + if msg.receivedTs < outdateTs.Load() { // Check whether to interrupt + ten.LogInfo(fmt.Sprintf("textChan interrupt and flushing for input text: [%s], receivedTs: %d, outdateTs: %d", + msg.text, msg.receivedTs, outdateTs.Load())) + continue + } + + wg.Add(1) + ten.LogInfo(fmt.Sprintf("textChan text: [%s]", msg.text)) + + r, w := io.Pipe() + startTime := time.Now() + + go func() { + defer wg.Done() + defer w.Close() + + ten.LogInfo(fmt.Sprintf("textToSpeechStream text: [%s]", msg.text)) + + err = e.elevenlabsTTS.textToSpeechStream(w, msg.text) + if err != nil { + ten.LogError(fmt.Sprintf("textToSpeechStream failed, err: %v", err)) + return + } + }() + + ten.LogInfo(fmt.Sprintf("read pcm stream, text:[%s], pcmFrameSize:%d", msg.text, pcmFrameSize)) + + var ( + firstFrameLatency int64 + n int + pcmFrameRead int + readBytes int + sentFrames int + ) + buf := pcm.newBuf() + + // read pcm stream + for { + if msg.receivedTs < outdateTs.Load() { // Check whether to interrupt + ten.LogInfo(fmt.Sprintf("read pcm stream interrupt and flushing for input text: [%s], receivedTs: %d, outdateTs: %d", + msg.text, msg.receivedTs, outdateTs.Load())) + break + } + + n, err = r.Read(buf[pcmFrameRead:]) + readBytes += n + pcmFrameRead += n + + if err != nil { + if err == io.EOF { + ten.LogInfo("read pcm stream EOF") + break + } + + ten.LogError(fmt.Sprintf("read pcm stream failed, err: %v", err)) + break + } + + if pcmFrameRead != pcmFrameSize { + ten.LogDebug(fmt.Sprintf("the number of bytes read is [%d] inconsistent with pcm frame size", pcmFrameRead)) + continue + } + + pcm.send(ten, buf) + // clear buf + buf = pcm.newBuf() + pcmFrameRead = 0 + sentFrames++ + + if firstFrameLatency == 0 { + firstFrameLatency = time.Since(startTime).Milliseconds() + ten.LogInfo(fmt.Sprintf("first frame available for text: [%s], receivedTs: %d, firstFrameLatency: %dms", msg.text, msg.receivedTs, firstFrameLatency)) + } + + ten.LogDebug(fmt.Sprintf("sending pcm data, text: [%s]", msg.text)) + } + + if pcmFrameRead > 0 { + pcm.send(ten, buf) + sentFrames++ + ten.LogInfo(fmt.Sprintf("sending pcm remain data, text: [%s], pcmFrameRead: %d", msg.text, pcmFrameRead)) + } + + r.Close() + ten.LogInfo(fmt.Sprintf("send pcm data finished, text: [%s], receivedTs: %d, readBytes: %d, sentFrames: %d, firstFrameLatency: %dms, finishLatency: %dms", + msg.text, msg.receivedTs, readBytes, sentFrames, firstFrameLatency, time.Since(startTime).Milliseconds())) + } + }() + + ten.OnStartDone() +} + +// OnCmd receives cmd from ten graph. +// current supported cmd: +// - name: flush +// example: +// {"name": "flush"} +func (e *elevenlabsTTSExtension) OnCmd( + tenEnv ten.TenEnv, + cmd ten.Cmd, +) { + cmdName, err := cmd.GetName() + if err != nil { + tenEnv.LogError(fmt.Sprintf("OnCmd get name failed, err: %v", err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + + tenEnv.LogInfo(fmt.Sprintf("OnCmd %s", cmdInFlush)) + + switch cmdName { + case cmdInFlush: + outdateTs.Store(time.Now().UnixMicro()) + + // send out + outCmd, err := ten.NewCmd(cmdOutFlush) + if err != nil { + tenEnv.LogError(fmt.Sprintf("new cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + + if err := tenEnv.SendCmd(outCmd, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("send cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } else { + tenEnv.LogInfo(fmt.Sprintf("cmd %s sent", cmdOutFlush)) + } + } + + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeOk) + tenEnv.ReturnResult(cmdResult, cmd, nil) +} + +// OnData receives data from ten graph. +// current supported data: +// - name: text_data +// example: +// {name: text_data, properties: {text: "hello"} +func (e *elevenlabsTTSExtension) OnData( + tenEnv ten.TenEnv, + data ten.Data, +) { + text, err := data.GetPropertyString(dataInTextDataPropertyText) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("OnData GetProperty %s failed, err: %v", dataInTextDataPropertyText, err)) + return + } + + if len(text) == 0 { + tenEnv.LogDebug("OnData text is empty, ignored") + return + } + + tenEnv.LogInfo(fmt.Sprintf("OnData input text: [%s]", text)) + + go func() { + textChan <- &message{text: text, receivedTs: time.Now().UnixMicro()} + }() +} + +func init() { + // Register addon + ten.RegisterAddonAsExtension( + "elevenlabs_tts", + ten.NewDefaultExtensionAddon(newElevenlabsTTSExtension), + ) +} diff --git a/agents/ten_packages/extension/elevenlabs_tts/go.mod b/agents/ten_packages/extension/elevenlabs_tts/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..04de2ed54824277ae0a0eacbe444ff82a4792af3 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/go.mod @@ -0,0 +1,10 @@ +module elevenlabs_tts + +go 1.20 + +replace ten_framework => ../../system/ten_runtime_go/interface + +require ( + github.com/haguro/elevenlabs-go v0.2.4 + ten_framework v0.0.0-00010101000000-000000000000 +) diff --git a/agents/ten_packages/extension/elevenlabs_tts/go.sum b/agents/ten_packages/extension/elevenlabs_tts/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..6c1feddc6352011b483b0b738bff2c4019bca8d0 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/go.sum @@ -0,0 +1,2 @@ +github.com/haguro/elevenlabs-go v0.2.4 h1:Z1a/I+b5fAtGSfrhEj97dYG1EbV9uRzSfvz5n5+ud34= +github.com/haguro/elevenlabs-go v0.2.4/go.mod h1:j15h9w2BpgxlIGWXmCKWPPDaTo2QAO83zFy5J+pFCt8= diff --git a/agents/ten_packages/extension/elevenlabs_tts/manifest.json b/agents/ten_packages/extension/elevenlabs_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..47e868b1a5d83b58e6ccb19f1592eebdc7be74b8 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/manifest.json @@ -0,0 +1,68 @@ +{ + "type": "extension", + "name": "elevenlabs_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model_id": { + "type": "string" + }, + "request_timeout_seconds": { + "type": "int64" + }, + "similarity_boost": { + "type": "float64" + }, + "speaker_boost": { + "type": "bool" + }, + "stability": { + "type": "float64" + }, + "style": { + "type": "float64" + }, + "optimize_streaming_latency": { + "type": "int64" + }, + "voice_id": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts/pcm.go b/agents/ten_packages/extension/elevenlabs_tts/pcm.go new file mode 100644 index 0000000000000000000000000000000000000000..a13433c00afb55f3b864a0c025c01cc8f887a640 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/pcm.go @@ -0,0 +1,103 @@ +/** + * + * Agora Real Time Engagement + * Created by XinHui Li in 2024-07. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "fmt" + + "ten_framework/ten" +) + +type pcm struct { + config *pcmConfig +} + +type pcmConfig struct { + BytesPerSample int32 + Channel int32 + ChannelLayout uint64 + Name string + SampleRate int32 + SamplesPerChannel int32 + Timestamp int64 +} + +func defaultPcmConfig() *pcmConfig { + return &pcmConfig{ + BytesPerSample: 2, + Channel: 1, + ChannelLayout: 1, + Name: "pcm_frame", + SampleRate: 16000, + SamplesPerChannel: 16000 / 100, + Timestamp: 0, + } +} + +func newPcm(config *pcmConfig) *pcm { + return &pcm{ + config: config, + } +} + +func (p *pcm) getPcmFrame(tenEnv ten.TenEnv, buf []byte) (pcmFrame ten.AudioFrame, err error) { + pcmFrame, err = ten.NewAudioFrame(p.config.Name) + if err != nil { + tenEnv.LogError(fmt.Sprintf("NewPcmFrame failed, err: %v", err)) + return + } + + // set pcm frame + pcmFrame.SetBytesPerSample(p.config.BytesPerSample) + pcmFrame.SetSampleRate(p.config.SampleRate) + pcmFrame.SetChannelLayout(p.config.ChannelLayout) + pcmFrame.SetNumberOfChannels(p.config.Channel) + pcmFrame.SetTimestamp(p.config.Timestamp) + pcmFrame.SetDataFmt(ten.AudioFrameDataFmtInterleave) + pcmFrame.SetSamplesPerChannel(p.config.SamplesPerChannel) + pcmFrame.AllocBuf(p.getPcmFrameSize()) + + borrowedBuf, err := pcmFrame.LockBuf() + if err != nil { + tenEnv.LogError(fmt.Sprintf("LockBuf failed, err: %v", err)) + return + } + + // copy data + copy(borrowedBuf, buf) + + pcmFrame.UnlockBuf(&borrowedBuf) + return +} + +func (p *pcm) getPcmFrameSize() int { + return int(p.config.SamplesPerChannel * p.config.Channel * p.config.BytesPerSample) +} + +func (p *pcm) newBuf() []byte { + return make([]byte, p.getPcmFrameSize()) +} + +func (p *pcm) send(tenEnv ten.TenEnv, buf []byte) (err error) { + pcmFrame, err := p.getPcmFrame(tenEnv, buf) + if err != nil { + tenEnv.LogError(fmt.Sprintf("getPcmFrame failed, err: %v", err)) + return + } + + // send pcm + if err = tenEnv.SendAudioFrame(pcmFrame, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("SendPcmFrame failed, err: %v", err)) + return + } + + return +} diff --git a/agents/ten_packages/extension/elevenlabs_tts/property.json b/agents/ten_packages/extension/elevenlabs_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..a17ebff8c694d79d6fab55c37a50b23cc6e2a0cb --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts/property.json @@ -0,0 +1,10 @@ +{ + "api_key": "${env:ELEVENLABS_TTS_KEY}", + "model_id": "eleven_multilingual_v2", + "optimize_streaming_latency": 0, + "request_timeout_seconds": 30, + "similarity_boost": 0.75, + "speaker_boost": false, + "stability": 0.5, + "voice_id": "pNInz6obpgDQGcFmaJgB" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/README.md b/agents/ten_packages/extension/elevenlabs_tts_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e6032c06ad25bed6916cf9491b5f15c4244dd784 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/README.md @@ -0,0 +1,29 @@ +# elevenlabs_tts_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/__init__.py b/agents/ten_packages/extension/elevenlabs_tts_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/addon.py b/agents/ten_packages/extension/elevenlabs_tts_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..af96068d7432834c748aecbd47701a4aac382915 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("elevenlabs_tts_python") +class ElevenLabsTTSExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import ElevenLabsTTSExtension + ten_env.log_info("ElevenLabsTTSExtensionAddon on_create_instance") + ten_env.on_create_instance_done(ElevenLabsTTSExtension(name), context) diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/elevenlabs_tts.py b/agents/ten_packages/extension/elevenlabs_tts_python/elevenlabs_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..afe30a131eab784a66efa55b2a18c75249399c8f --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/elevenlabs_tts.py @@ -0,0 +1,57 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024-07. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from dataclasses import dataclass +from typing import AsyncIterator +from ten_ai_base.config import BaseConfig + + +@dataclass +class ElevenLabsTTSConfig(BaseConfig): + api_key: str = "" + model_id: str = "eleven_multilingual_v2" + optimize_streaming_latency: int = 0 + similarity_boost: float = 0.75 + speaker_boost: bool = False + stability: float = 0.5 + request_timeout_seconds: int = 10 + style: float = 0.0 + voice_id: str = "pNInz6obpgDQGcFmaJgB" + + +class ElevenLabsTTS: + def __init__(self, config: ElevenLabsTTSConfig) -> None: + self.config = config + self.client = None + + def text_to_speech_stream(self, text: str) -> AsyncIterator[bytes]: + # to avoid circular import issue when using openai with 11labs + from elevenlabs.client import AsyncElevenLabs + from elevenlabs import Voice, VoiceSettings + + if not self.client: + self.client = AsyncElevenLabs( + api_key=self.config.api_key, timeout=self.config.request_timeout_seconds + ) + + return self.client.generate( + text=text, + model=self.config.model_id, + optimize_streaming_latency=self.config.optimize_streaming_latency, + output_format="pcm_16000", + stream=True, + voice=Voice( + voice_id=self.config.voice_id, + settings=VoiceSettings( + stability=self.config.stability, + similarity_boost=self.config.similarity_boost, + style=self.config.style, + speaker_boost=self.config.speaker_boost, + ), + ), + ) diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/extension.py b/agents/ten_packages/extension/elevenlabs_tts_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..9afcb38661817934d2a2eb35bed9f9e798736758 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/extension.py @@ -0,0 +1,55 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import traceback +from .elevenlabs_tts import ElevenLabsTTS, ElevenLabsTTSConfig +from ten import ( + AsyncTenEnv, +) +from ten_ai_base.tts import AsyncTTSBaseExtension + + +class ElevenLabsTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + self.config = None + self.client = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + try: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.config = await ElevenLabsTTSConfig.create_async(ten_env=ten_env) + + if not self.config.api_key: + raise ValueError("api_key is required") + + self.client = ElevenLabsTTS(self.config) + except Exception: + ten_env.log_error(f"on_start failed: {traceback.format_exc()}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + audio_stream = await self.client.text_to_speech_stream(input_text) + ten_env.log_info(f"on_request_tts: {input_text}") + async for audio_data in audio_stream: + await self.send_audio_out(ten_env, audio_data) + ten_env.log_info(f"on_request_tts: {input_text} done") + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + return await super().on_cancel_tts(ten_env) diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/manifest.json b/agents/ten_packages/extension/elevenlabs_tts_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..b50298b4b2768fe0245e3da9a6424d677f8a1cd1 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/manifest.json @@ -0,0 +1,79 @@ +{ + "type": "extension", + "name": "elevenlabs_tts_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model_id": { + "type": "string" + }, + "request_timeout_seconds": { + "type": "int64" + }, + "similarity_boost": { + "type": "float64" + }, + "speaker_boost": { + "type": "bool" + }, + "stability": { + "type": "float64" + }, + "style": { + "type": "float64" + }, + "optimize_streaming_latency": { + "type": "int64" + }, + "voice_id": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/property.json b/agents/ten_packages/extension/elevenlabs_tts_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..2f2e583da5af75d50e8607b322d5d56daeee937b --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/property.json @@ -0,0 +1,12 @@ +{ + "api_key": "${env:ELEVENLABS_TTS_KEY}", + "model_id": "eleven_multilingual_v2", + "optimize_streaming_latency": 0, + "request_timeout_seconds": 30, + "similarity_boost": 0.75, + "speaker_boost": false, + "stability": 0.5, + "voice_id": "pNInz6obpgDQGcFmaJgB", + "prompt": "", + "base_url": "" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/requirements.txt b/agents/ten_packages/extension/elevenlabs_tts_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..baecca8fc97023047d332cb72e22e57f3ec867de --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/requirements.txt @@ -0,0 +1 @@ +elevenlabs>=1.50.0 \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/tests/bin/start b/agents/ten_packages/extension/elevenlabs_tts_python/tests/bin/start new file mode 100644 index 0000000000000000000000000000000000000000..04d784ea179c32ded5fc50565fb28b4ae0585c6b --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/tests/bin/start @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")/../.." + +export PYTHONPATH=.ten/app:.ten/app/ten_packages/system/ten_runtime_python/lib:.ten/app/ten_packages/system/ten_runtime_python/interface:.ten/app/ten_packages/system/ten_ai_base/interface:$PYTHONPATH + +# If the Python app imports some modules that are compiled with a different +# version of libstdc++ (ex: PyTorch), the Python app may encounter confusing +# errors. To solve this problem, we can preload the correct version of +# libstdc++. +# +# export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6 +# +# Another solution is to make sure the module 'ten_runtime_python' is imported +# _after_ the module that requires another version of libstdc++ is imported. +# +# Refer to https://github.com/pytorch/pytorch/issues/102360?from_wecom=1#issuecomment-1708989096 + +pytest tests/ "$@" \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/tests/conftest.py b/agents/ten_packages/extension/elevenlabs_tts_python/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2175e36e06ea1b6b40e07c5cf1e134ee1aec17 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/tests/conftest.py @@ -0,0 +1,36 @@ +# +# Copyright © 2025 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +import pytest +import sys +import os +from ten import ( + unregister_all_addons_and_cleanup, +) + + +@pytest.fixture(scope="session", autouse=True) +def global_setup_and_teardown(): + # Set the environment variable. + os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] = "true" + + # Verify the environment variable is correctly set. + if ( + "TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE" not in os.environ + or os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] != "true" + ): + print( + "Failed to set TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE", + file=sys.stderr, + ) + sys.exit(1) + + # Yield control to the test; after the test execution is complete, continue + # with the teardown process. + yield + + # Teardown part. + unregister_all_addons_and_cleanup() \ No newline at end of file diff --git a/agents/ten_packages/extension/elevenlabs_tts_python/tests/test_basic.py b/agents/ten_packages/extension/elevenlabs_tts_python/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ad3d440e2065808da4b8ef47bc6afd652e4f96 --- /dev/null +++ b/agents/ten_packages/extension/elevenlabs_tts_python/tests/test_basic.py @@ -0,0 +1,36 @@ +# +# Copyright © 2024 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +from pathlib import Path +from ten import ExtensionTester, TenEnvTester, Cmd, CmdResult, StatusCode + + +class ExtensionTesterBasic(ExtensionTester): + def check_hello(self, ten_env: TenEnvTester, result: CmdResult): + statusCode = result.get_status_code() + print("receive hello_world, status:" + str(statusCode)) + + if statusCode == StatusCode.OK: + # TODO: move stop_test() to where the test passes + ten_env.stop_test() + + def on_start(self, ten_env: TenEnvTester) -> None: + new_cmd = Cmd.create("hello_world") + + print("send hello_world") + ten_env.send_cmd( + new_cmd, + lambda ten_env, result,_: self.check_hello(ten_env, result), + ) + + print("tester on_start_done") + ten_env.on_start_done() + + +def test_basic(): + tester = ExtensionTesterBasic() + tester.set_test_mode_single("elevenlabs_tts_python") + tester.run() diff --git a/agents/ten_packages/extension/fashionai/README.md b/agents/ten_packages/extension/fashionai/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5af79baca1527c0ae4ac98af9a5fa59ebe544fc4 --- /dev/null +++ b/agents/ten_packages/extension/fashionai/README.md @@ -0,0 +1,29 @@ +# fashionai + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/fashionai/__init__.py b/agents/ten_packages/extension/fashionai/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..645dc801215620e030a431687f48186f711fbf8b --- /dev/null +++ b/agents/ten_packages/extension/fashionai/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from .src import addon diff --git a/agents/ten_packages/extension/fashionai/manifest.json b/agents/ten_packages/extension/fashionai/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..eadb2cc25756ba618ebc1e36d959be6740d84d6d --- /dev/null +++ b/agents/ten_packages/extension/fashionai/manifest.json @@ -0,0 +1,63 @@ +{ + "type": "extension", + "name": "fashionai", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "src/**.tent", + "src/**.py", + "README.md" + ] + }, + "api": { + "property": { + "app_id": { + "type": "string" + }, + "token": { + "type": "string" + }, + "channel": { + "type": "string" + }, + "stream_id": { + "type": "uint32" + }, + "service_id": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/fashionai/property.json b/agents/ten_packages/extension/fashionai/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/fashionai/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/fashionai/requirements.txt b/agents/ten_packages/extension/fashionai/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a38911714aee65e0c35d5503dfe48769e36470d --- /dev/null +++ b/agents/ten_packages/extension/fashionai/requirements.txt @@ -0,0 +1 @@ +websockets \ No newline at end of file diff --git a/agents/ten_packages/extension/fashionai/src/__init__.py b/agents/ten_packages/extension/fashionai/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/ten_packages/extension/fashionai/src/addon.py b/agents/ten_packages/extension/fashionai/src/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..2200059e1e4837e67f533dd3f37caf59b05eafe6 --- /dev/null +++ b/agents/ten_packages/extension/fashionai/src/addon.py @@ -0,0 +1,21 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("fashionai") +class FashionAIExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import FashionAIExtension + ten_env.log_info("FashionAIExtensionAddon on_create_instance") + ten_env.on_create_instance_done(FashionAIExtension(name), context) diff --git a/agents/ten_packages/extension/fashionai/src/extension.py b/agents/ten_packages/extension/fashionai/src/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..90b05804da4a071a7d9da3937a8953a57e252f05 --- /dev/null +++ b/agents/ten_packages/extension/fashionai/src/extension.py @@ -0,0 +1,144 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import traceback +from ten import ( + AudioFrame, + VideoFrame, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +from ten.async_extension import AsyncExtension + +import asyncio +from .fashionai_client import FashionAIClient +from datetime import datetime + + +class FashionAIExtension(AsyncExtension): + + def __init__(self, name: str): + super().__init__(name) + + self.app_id = "" + self.token = "" + self.channel = "" + self.stream_id = 0 + self.service_id = "agora" + self.stopped = False + self.queue = None + self.client = None + self.outdate_ts = datetime.now() + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info( + "FASHION_AI on_init *********************************************************" + ) + self.stopped = False + self.queue = asyncio.Queue(maxsize=3000) + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info( + "FASHION_AI on_start *********************************************************" + ) + + try: + self.app_id = await ten_env.get_property_string("app_id") + self.token = await ten_env.get_property_string("token") + self.channel = await ten_env.get_property_string("channel") + self.stream_id = str(await ten_env.get_property_int("stream_id")) + self.service_id = await ten_env.get_property_string("service_id") + + ten_env.log_info( + f"FASHION_AI on_start: app_id = {self.app_id}, token = {self.token}, channel = {self.channel}, stream_id = {self.stream_id}, service_id = {self.service_id}" + ) + except Exception as e: + ten_env.log_warn(f"get_property err: {e}") + + if len(self.token) > 0: + self.app_id = self.token + self.client = FashionAIClient( + ten_env, + "wss://ingress.service.fasionai.com/websocket/node5/agoramultimodel2", + self.service_id, + ) + asyncio.create_task(self.process_input_text(ten_env)) + await self.init_fashionai(self.app_id, self.channel, self.stream_id) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("FASHION_AI on_stop") + self.stopped = True + await self.queue.put(None) + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("FASHION_AI on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_info(f"FASHION_AI on_cmd name {cmd_name}") + + if cmd_name == "flush": + self.outdate_ts = datetime.now() + try: + await self.client.send_interrupt() + + except Exception: + ten_env.log_warn(f"flush err: {traceback.format_exc()}") + + cmd_out = Cmd.create("flush") + await ten_env.send_cmd(cmd_out) + # ten_env.send_cmd(cmd_out, lambda ten, result: ten_env.log_info("send_cmd flush done")) + else: + ten_env.log_info(f"unknown cmd {cmd_name}") + + ten_env.log_info("FASHION_AI on_cmd done") + cmd_result = CmdResult.create(StatusCode.OK) + await ten_env.return_result(cmd_result, cmd) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + input_text = data.get_property_string("text") + if len(input_text) == 0: + ten_env.log_info("FASHION_AI ignore empty text") + return + + ten_env.log_info("FASHION_AI on data %s", input_text) + try: + await self.queue.put(input_text) + except asyncio.TimeoutError: + ten_env.log_warn(f"FASHION_AI put inputText={input_text} queue timed out") + except Exception as e: + ten_env.log_warn(f"FASHION_AI put inputText={input_text} queue err: {e}") + ten_env.log_info("FASHION_AI send_inputText %s", input_text) + + async def on_audio_frame(self, _: AsyncTenEnv, audio_frame: AudioFrame) -> None: + pass + + async def on_video_frame(self, _: AsyncTenEnv, video_frame: VideoFrame) -> None: + pass + + async def init_fashionai(self, app_id, channel, stream_id): + await self.client.connect() + await self.client.stream_start(app_id, channel, stream_id) + await self.client.render_start() + + async def process_input_text(self, ten_env: AsyncTenEnv): + while True: + input_text = await self.queue.get() + if input_text is None: + ten_env.log_info("Stopping async_polly_handler...") + break + + ten_env.log_info(f"async_polly_handler: loop fashion ai polly.{input_text}") + + if len(input_text) > 0: + try: + await self.client.send_inputText(input_text) + except Exception as e: + ten_env.log_error(str(e)) diff --git a/agents/ten_packages/extension/fashionai/src/fashionai_client.py b/agents/ten_packages/extension/fashionai/src/fashionai_client.py new file mode 100644 index 0000000000000000000000000000000000000000..0b2e36e327f83e762f226936288bec66a4526499 --- /dev/null +++ b/agents/ten_packages/extension/fashionai/src/fashionai_client.py @@ -0,0 +1,121 @@ +import json +import ssl +import uuid + +import websockets +import asyncio + + +class FashionAIClient: + def __init__(self, ten_env, uri, service_id): + self.uri = uri + self.websocket = None + self.service_id = service_id + self.cancelled = False + self.ten_env = ten_env + + async def connect(self): + # pylint: disable=protected-access + ssl_context = ssl._create_unverified_context() + self.websocket = await websockets.connect(self.uri, ssl=ssl_context) + asyncio.create_task( + self.listen() + ) # Start listening immediately after connection + + async def listen(self): + """Continuously listen for incoming messages.""" + if self.websocket is not None: + try: + async for message in self.websocket: + self.ten_env.log_info(f"FASHION_AI Received: {message}") + # await self.handle_message(message) + except websockets.exceptions.ConnectionClosedError as e: + self.ten_env.log_info(f"FASHION_AI Connection closed with error: {e}") + await self.reconnect() + + async def stream_start(self, app_id, channel, stream_id): + await self.send_message( + { + "request_id": str(uuid.uuid4()), + "service_id": self.service_id, + "token": app_id, + "channel_id": channel, + "user_id": stream_id, + "signal": "STREAM_START", + } + ) + + async def stream_stop(self): + await self.send_message( + { + "request_id": str(uuid.uuid4()), + "service_id": self.service_id, + "signal": "STREAM_STOP", + } + ) + + async def render_start(self): + await self.send_message( + { + "request_id": str(uuid.uuid4()), + "service_id": self.service_id, + "signal": "RENDER_START", + } + ) + self.cancelled = False + + async def send_inputText(self, inputText): + if self.cancelled: + await self.render_start() + await self.send_message( + { + "request_id": str(uuid.uuid4()), + "service_id": self.service_id, + "signal": "RENDER_CONTENT", + "text": inputText, + } + ) + + async def send_interrupt(self): + await self.send_message( + { + "service_id": self.service_id, + "signal": "RENDER_CANCEL", + } + ) + self.cancelled = True + + async def send_message(self, message): + if self.websocket is not None: + try: + await self.websocket.send(json.dumps(message)) + self.ten_env.log_info(f"FASHION_AI Sent: {message}") + # response = await asyncio.wait_for(self.websocket.recv(), timeout=2) + # self.ten_env.log_info(f"FASHION_AI Received: {response}") + except websockets.exceptions.ConnectionClosedError as e: + self.ten_env.log_info(f"FASHION_AI Connection closed with error: {e}") + await self.reconnect() + except asyncio.TimeoutError: + self.ten_env.log_info("FASHION_AI Timeout waiting for response") + else: + self.ten_env.log_info("FASHION_AI WebSocket is not connected.") + + async def close(self): + if self.websocket is not None: + await self.websocket.close() + self.ten_env.log_info("FASHION_AI WebSocket connection closed.") + else: + self.ten_env.log_info("FASHION_AI WebSocket is not connected.") + + async def reconnect(self): + self.ten_env.log_info("FASHION_AI Reconnecting...") + await self.close() + await self.connect() + + async def heartbeat(self, interval): + while True: + await asyncio.sleep(interval) + try: + await self.send_inputText("ping") + except websockets.exceptions.ConnectionClosedError: + break diff --git a/agents/ten_packages/extension/file_chunker/__init__.py b/agents/ten_packages/extension/file_chunker/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1b1d399c82e881ecd59f9c99cf18a0c767f81d --- /dev/null +++ b/agents/ten_packages/extension/file_chunker/__init__.py @@ -0,0 +1 @@ +from . import file_chunker_addon diff --git a/agents/ten_packages/extension/file_chunker/file_chunker_addon.py b/agents/ten_packages/extension/file_chunker/file_chunker_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..7828f1a3439bea82a115cb0b20790a1d9f148ac9 --- /dev/null +++ b/agents/ten_packages/extension/file_chunker/file_chunker_addon.py @@ -0,0 +1,13 @@ +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("file_chunker") +class FileChunkerExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + from .file_chunker_extension import FileChunkerExtension + ten.log_info("on_create_instance") + ten.on_create_instance_done(FileChunkerExtension(addon_name), context) diff --git a/agents/ten_packages/extension/file_chunker/file_chunker_extension.py b/agents/ten_packages/extension/file_chunker/file_chunker_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf4072814f3abb773417a9dc54db70b9de2ad9d --- /dev/null +++ b/agents/ten_packages/extension/file_chunker/file_chunker_extension.py @@ -0,0 +1,223 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-05. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Extension, + TenEnv, + Cmd, + StatusCode, + CmdResult, +) +from typing import List, Any +import json +from datetime import datetime +import uuid, math +import queue, threading + +CMD_FILE_CHUNK = "file_chunk" +UPSERT_VECTOR_CMD = "upsert_vector" +FILE_CHUNKED_CMD = "file_chunked" + +CHUNK_SIZE = 200 +CHUNK_OVERLAP = 20 +BATCH_SIZE = 5 + + +def batch(nodes, size): + batch_texts = [] + for n in nodes: + batch_texts.append(n.text) + if len(batch_texts) == size: + yield batch_texts[:] + batch_texts.clear() + if batch_texts: + yield batch_texts + + +class FileChunkerExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + + self.counters = {} + self.expected = {} + self.new_collection_name = "" + self.file_chunked_event = threading.Event() + + self.thread = None + self.queue = queue.Queue() + self.stop = False + + def generate_collection_name(self) -> str: + """ + follow rules: ^[a-z]+[a-z0-9_]* + """ + + return "coll_" + uuid.uuid1().hex.lower() + + def split(self, ten: TenEnv, path: str) -> List[Any]: + # lazy import packages which requires long time to load + from llama_index.core import SimpleDirectoryReader + from llama_index.core.node_parser import SentenceSplitter + + # load pdf file by path + documents = SimpleDirectoryReader( + input_files=[path], filename_as_id=True + ).load_data() + + # split pdf file into chunks + splitter = SentenceSplitter( + chunk_size=CHUNK_SIZE, + chunk_overlap=CHUNK_OVERLAP, + ) + nodes = splitter.get_nodes_from_documents(documents) + ten.log_info(f"file {path} pages count {documents}, chunking count {nodes}") + return nodes + + def create_collection(self, ten: TenEnv, collection_name: str, wait: bool): + cmd_out = Cmd.create("create_collection") + cmd_out.set_property_string("collection_name", collection_name) + + wait_event = threading.Event() + ten.send_cmd( + cmd_out, + lambda ten, result, _: wait_event.set(), + ) + if wait: + wait_event.wait() + + def embedding(self, ten: TenEnv, path: str, texts: List[str]): + ten.log_info( + f"generate embeddings for the file: {path}, with batch size: {len(texts)}" + ) + + cmd_out = Cmd.create("embed_batch") + cmd_out.set_property_from_json("inputs", json.dumps(texts)) + ten.send_cmd( + cmd_out, lambda ten, result, _: self.vector_store(ten, path, texts, result) + ) + + def vector_store(self, ten: TenEnv, path: str, texts: List[str], result: CmdResult): + ten.log_info(f"vector store start for one splitting of the file {path}") + file_name = path.split("/")[-1] + embed_output_json = result.get_property_string("embeddings") + embed_output = json.loads(embed_output_json) + cmd_out = Cmd.create(UPSERT_VECTOR_CMD) + cmd_out.set_property_string("collection_name", self.new_collection_name) + cmd_out.set_property_string("file_name", file_name) + embeddings = [record["embedding"] for record in embed_output] + content = [] + for text, embedding in zip(texts, embeddings): + content.append({"text": text, "embedding": embedding}) + cmd_out.set_property_string("content", json.dumps(content)) + # ten.log_info(json.dumps(content)) + ten.send_cmd(cmd_out, lambda ten, result, _: self.file_chunked(ten, path)) + + def file_chunked(self, ten: TenEnv, path: str): + if path in self.counters and path in self.expected: + self.counters[path] += 1 + ten.log_info( + "complete vector store for one splitting of the file: %s, current counter: %i, expected: %i", + path, + self.counters[path], + self.expected[path], + ) + if self.counters[path] == self.expected[path]: + chunks_count = self.counters[path] + del self.counters[path] + del self.expected[path] + ten.log_info( + f"complete chunk for the file: {path}, chunks_count {chunks_count}" + ) + cmd_out = Cmd.create(FILE_CHUNKED_CMD) + cmd_out.set_property_string("path", path) + cmd_out.set_property_string("collection", self.new_collection_name) + ten.send_cmd( + cmd_out, + lambda ten, result, _: ten.log_info("send_cmd done"), + ) + self.file_chunked_event.set() + else: + ten.log_error("missing counter for the file path: %s", path) + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + if cmd_name == CMD_FILE_CHUNK: + path = cmd.get_property_string("path") + + collection = None + try: + collection = cmd.get_property_string("collection") + except Exception: + ten.log_warn(f"missing collection property in cmd {cmd_name}") + + self.queue.put((path, collection)) # make sure files are processed in order + else: + ten.log_info(f"unknown cmd {cmd_name}") + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "ok") + ten.return_result(cmd_result, cmd) + + def async_handler(self, ten: TenEnv) -> None: + while not self.stop: + value = self.queue.get() + if value is None: + break + path, collection = value + + # start processing the file + start_time = datetime.now() + if collection is None: + collection = self.generate_collection_name() + ten.log_info(f"collection {collection} generated") + ten.log_info(f"start processing {path}, collection {collection}") + + # create collection + self.create_collection(ten, collection, True) + ten.log_info(f"collection {collection} created") + + # split + nodes = self.split(ten, path) + + # reset counters and events + self.new_collection_name = collection + self.expected[path] = math.ceil(len(nodes) / BATCH_SIZE) + self.counters[path] = 0 + self.file_chunked_event.clear() + + # trigger embedding and vector storing in parallel + for texts in list(batch(nodes, BATCH_SIZE)): + self.embedding(ten, path, texts) + + # wait for all chunks to be processed + self.file_chunked_event.wait() + + ten.log_info( + f"finished processing {path}, collection {collection}, cost {int((datetime.now() - start_time).total_seconds() * 1000)}ms" + ) + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("on_start") + + self.stop = False + self.thread = threading.Thread(target=self.async_handler, args=[ten]) + self.thread.start() + + ten.on_start_done() + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("on_stop") + + self.stop = True + if self.thread is not None: + while not self.queue.empty(): + self.queue.get() + self.queue.put(None) + self.thread.join() + self.thread = None + + ten.on_stop_done() diff --git a/agents/ten_packages/extension/file_chunker/manifest.json b/agents/ten_packages/extension/file_chunker/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..c777aae8967814666b02846032201adf34f00541 --- /dev/null +++ b/agents/ten_packages/extension/file_chunker/manifest.json @@ -0,0 +1,105 @@ +{ + "type": "extension", + "name": "file_chunker", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": {}, + "cmd_in": [ + { + "name": "file_chunk", + "property": { + "filename": { + "type": "string" + }, + "path": { + "type": "string" + }, + "collection": { + "type": "string" + } + }, + "required": [ + "path" + ] + } + ], + "cmd_out": [ + { + "name": "embed_batch", + "property": { + "inputs": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "inputs" + ], + "result": { + "property": { + "embeddings": { + "type": "string" + } + } + } + }, + { + "name": "upsert_vector", + "property": { + "collection_name": { + "type": "string" + }, + "file_name": { + "type": "string" + }, + "content": { + "type": "string" + } + }, + "required": [ + "collection_name", + "file_name", + "content" + ] + }, + { + "name": "create_collection", + "property": { + "collection_name": { + "type": "string" + }, + "dimension": { + "type": "int32" + } + }, + "required": [ + "collection_name" + ] + }, + { + "name": "file_chunked", + "property": { + "path": { + "type": "string" + }, + "collection": { + "type": "string" + } + }, + "required": [ + "path", + "collection" + ] + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/file_chunker/property.json b/agents/ten_packages/extension/file_chunker/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/file_chunker/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/file_chunker/requirements.txt b/agents/ten_packages/extension/file_chunker/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b5a22c290973c5221dcb18f682f714431dc6ce3f --- /dev/null +++ b/agents/ten_packages/extension/file_chunker/requirements.txt @@ -0,0 +1,2 @@ +pypdf +llama-index \ No newline at end of file diff --git a/agents/ten_packages/extension/fish_audio_tts/fish_audio_tts.go b/agents/ten_packages/extension/fish_audio_tts/fish_audio_tts.go new file mode 100644 index 0000000000000000000000000000000000000000..f3449df184be440cc030d0817b2cf3f04f2ea0e7 --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/fish_audio_tts.go @@ -0,0 +1,128 @@ +/** + * + * Agora Real Time Engagement + * Created by Hai Guo in 2024-08. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// An extension written by Go for TTS +package extension + +import ( + "bytes" + "fmt" + "io" + "net/http" + "ten_framework/ten" + "time" + + "github.com/vmihailenco/msgpack/v5" +) + +type fishAudioTTS struct { + client *http.Client //? + config fishAudioTTSConfig +} + +type fishAudioTTSConfig struct { + ApiKey string + ModelId string + OptimizeStreamingLatency bool + RequestTimeoutSeconds int + BaseUrl string +} + +func defaultFishAudioTTSConfig() fishAudioTTSConfig { + return fishAudioTTSConfig{ + ApiKey: "", + ModelId: "d8639b5cc95548f5afbcfe22d3ba5ce5", + OptimizeStreamingLatency: true, + RequestTimeoutSeconds: 30, + BaseUrl: "https://api.fish.audio", + } +} + +func newFishAudioTTS(config fishAudioTTSConfig) (*fishAudioTTS, error) { + return &fishAudioTTS{ + config: config, + client: &http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: 10, + // Keep-Alive connection never expires + IdleConnTimeout: time.Second * 0, + }, + Timeout: time.Second * time.Duration(config.RequestTimeoutSeconds), + }, + }, nil +} + +func (e *fishAudioTTS) textToSpeechStream(tenEnv ten.TenEnv, streamWriter io.Writer, text string) (err error) { + latency := "normal" + if e.config.OptimizeStreamingLatency { + latency = "balanced" + } + + // Create the payload + payload := map[string]interface{}{ + "text": text, + "chunk_length": 100, + "latency": latency, + "reference_id": e.config.ModelId, + "format": "pcm", // 44100/ 1ch/ 16bit + } + + // Encode the payload to MessagePack + body, err := msgpack.Marshal(payload) + if err != nil { + panic(err) + } + + // Create a new POST request + req, err := http.NewRequest("POST", e.config.BaseUrl+"/v1/tts", bytes.NewBuffer(body)) + if err != nil { + panic(err) + } + + // Set the headers + req.Header.Add("Authorization", "Bearer "+e.config.ApiKey) + req.Header.Set("Content-Type", "application/msgpack") + + // Create a client and send the request + client := e.client + resp, err := client.Do(req) + if err != nil { + panic(err) + } + defer resp.Body.Close() + + if err != nil { + return fmt.Errorf("TextToSpeechStream failed, err: %v", err) + } + + // Check the response status code + if resp.StatusCode != http.StatusOK { + tenEnv.LogError(fmt.Sprintf("Unexpected response status, status: %d", resp.StatusCode)) + return fmt.Errorf("unexpected response status: %d", resp.StatusCode) + } + + // Write the returned PCM data to streamWriter + buffer := make([]byte, 4096) // 4KB buffer size + for { + n, err := resp.Body.Read(buffer) + if err != nil && err != io.EOF { + tenEnv.LogError(fmt.Sprintf("Failed to read from response body, error: %s", err)) + return fmt.Errorf("failed to read from response body: %w", err) + } + if n == 0 { + break // end of the stream + } + + _, writeErr := streamWriter.Write(buffer[:n]) + if writeErr != nil { + tenEnv.LogError(fmt.Sprintf("Failed to write to streamWriter, error: %s", writeErr)) + return fmt.Errorf("failed to write to streamWriter: %w", writeErr) + } + } + + return nil +} diff --git a/agents/ten_packages/extension/fish_audio_tts/fish_audio_tts_extension.go b/agents/ten_packages/extension/fish_audio_tts/fish_audio_tts_extension.go new file mode 100644 index 0000000000000000000000000000000000000000..7d2d5f4d36628f4d642835ce267a4c99d95f58a0 --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/fish_audio_tts_extension.go @@ -0,0 +1,303 @@ +/** + * + * Agora Real Time Engagement + * Created by Hai Guo in 2024-08. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// An extension written by Go for TTS +package extension + +import ( + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "ten_framework/ten" +) + +const ( + cmdInFlush = "flush" + cmdOutFlush = "flush" + dataInTextDataPropertyText = "text" + + propertyApiKey = "api_key" // Required + propertyModelId = "model_id" // Optional + propertyOptimizeStreamingLatency = "optimize_streaming_latency" // Optional + propertyRequestTimeoutSeconds = "request_timeout_seconds" // Optional + propertyBaseUrl = "base_url" // Optional +) + +const ( + textChanMax = 1024 +) + +var ( + outdateTs atomic.Int64 + textChan chan *message + wg sync.WaitGroup +) + +type fishAudioTTSExtension struct { + ten.DefaultExtension + fishAudioTTS *fishAudioTTS +} + +type message struct { + text string + receivedTs int64 +} + +func newFishAudioTTSExtension(name string) ten.Extension { + return &fishAudioTTSExtension{} +} + +// OnStart will be called when the extension is starting, +// properies can be read here to initialize and start the extension. +// current supported properties: +// - api_key (required) +// - model_id +// - optimize_streaming_latency +// - request_timeout_seconds +// - base_url +func (e *fishAudioTTSExtension) OnStart(ten ten.TenEnv) { + ten.LogInfo("OnStart") + + // prepare configuration + fishAudioTTSConfig := defaultFishAudioTTSConfig() + + if apiKey, err := ten.GetPropertyString(propertyApiKey); err != nil { + ten.LogError(fmt.Sprintf("GetProperty required %s failed, err: %v", propertyApiKey, err)) + return + } else { + fishAudioTTSConfig.ApiKey = apiKey + } + + if modelId, err := ten.GetPropertyString(propertyModelId); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyModelId, err)) + } else { + if len(modelId) > 0 { + fishAudioTTSConfig.ModelId = modelId + } + } + + if optimizeStreamingLatency, err := ten.GetPropertyBool(propertyOptimizeStreamingLatency); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyOptimizeStreamingLatency, err)) + } else { + fishAudioTTSConfig.OptimizeStreamingLatency = optimizeStreamingLatency + } + + if requestTimeoutSeconds, err := ten.GetPropertyInt64(propertyRequestTimeoutSeconds); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyRequestTimeoutSeconds, err)) + } else { + if requestTimeoutSeconds > 0 { + fishAudioTTSConfig.RequestTimeoutSeconds = int(requestTimeoutSeconds) + } + } + + if baseUrl, err := ten.GetPropertyString(propertyBaseUrl); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyBaseUrl, err)) + } else { + if len(baseUrl) > 0 { + fishAudioTTSConfig.BaseUrl = baseUrl + } + } + + // create fishAudioTTS instance + fishAudioTTS, err := newFishAudioTTS(fishAudioTTSConfig) + if err != nil { + ten.LogError(fmt.Sprintf("newFishAudioTTS failed, err: %v", err)) + return + } + + ten.LogInfo(fmt.Sprintf("newFishAudioTTS succeed with ModelId: %s", + fishAudioTTSConfig.ModelId)) + + // set fishAudio instance + e.fishAudioTTS = fishAudioTTS + + // create pcm instance + pcm := newPcm(defaultPcmConfig()) + pcmFrameSize := pcm.getPcmFrameSize() + + // init chan + textChan = make(chan *message, textChanMax) + + go func() { + ten.LogInfo("process textChan") + + for msg := range textChan { + if msg.receivedTs < outdateTs.Load() { // Check whether to interrupt + ten.LogInfo(fmt.Sprintf("textChan interrupt and flushing for input text: [%s], receivedTs: %d, outdateTs: %d", + msg.text, msg.receivedTs, outdateTs.Load())) + continue + } + + wg.Add(1) + ten.LogInfo(fmt.Sprintf("textChan text: [%s]", msg.text)) + + r, w := io.Pipe() + startTime := time.Now() + + go func() { + defer wg.Done() + defer w.Close() + + ten.LogInfo(fmt.Sprintf("textToSpeechStream text: [%s]", msg.text)) + err = e.fishAudioTTS.textToSpeechStream(ten, w, msg.text) + ten.LogInfo(fmt.Sprintf("textToSpeechStream result: [%v]", err)) + if err != nil { + ten.LogError(fmt.Sprintf("textToSpeechStream failed, err: %v", err)) + return + } + }() + + ten.LogInfo(fmt.Sprintf("read pcm stream, text:[%s], pcmFrameSize:%d", msg.text, pcmFrameSize)) + + var ( + firstFrameLatency int64 + n int + pcmFrameRead int + readBytes int + sentFrames int + ) + buf := pcm.newBuf() + + // read pcm stream + for { + if msg.receivedTs < outdateTs.Load() { // Check whether to interrupt + ten.LogInfo(fmt.Sprintf("read pcm stream interrupt and flushing for input text: [%s], receivedTs: %d, outdateTs: %d", + msg.text, msg.receivedTs, outdateTs.Load())) + break + } + + n, err = r.Read(buf[pcmFrameRead:]) + readBytes += n + pcmFrameRead += n + + if err != nil { + if err == io.EOF { + ten.LogInfo("read pcm stream EOF") + break + } + + ten.LogError(fmt.Sprintf("read pcm stream failed, err: %v", err)) + break + } + + if pcmFrameRead != pcmFrameSize { + ten.LogDebug(fmt.Sprintf("the number of bytes read is [%d] inconsistent with pcm frame size", pcmFrameRead)) + continue + } + + pcm.send(ten, buf) + // clear buf + buf = pcm.newBuf() + pcmFrameRead = 0 + sentFrames++ + + if firstFrameLatency == 0 { + firstFrameLatency = time.Since(startTime).Milliseconds() + ten.LogInfo(fmt.Sprintf("first frame available for text: [%s], receivedTs: %d, firstFrameLatency: %dms", msg.text, msg.receivedTs, firstFrameLatency)) + } + + ten.LogDebug(fmt.Sprintf("sending pcm data, text: [%s]", msg.text)) + } + + if pcmFrameRead > 0 { + pcm.send(ten, buf) + sentFrames++ + ten.LogInfo(fmt.Sprintf("sending pcm remain data, text: [%s], pcmFrameRead: %d", msg.text, pcmFrameRead)) + } + + r.Close() + ten.LogInfo(fmt.Sprintf("send pcm data finished, text: [%s], receivedTs: %d, readBytes: %d, sentFrames: %d, firstFrameLatency: %dms, finishLatency: %dms", + msg.text, msg.receivedTs, readBytes, sentFrames, firstFrameLatency, time.Since(startTime).Milliseconds())) + } + }() + + ten.OnStartDone() +} + +// OnCmd receives cmd from ten graph. +// current supported cmd: +// - name: flush +// example: +// {"name": "flush"} +func (e *fishAudioTTSExtension) OnCmd( + tenEnv ten.TenEnv, + cmd ten.Cmd, +) { + cmdName, err := cmd.GetName() + if err != nil { + tenEnv.LogError(fmt.Sprintf("OnCmd get name failed, err: %v", err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + + tenEnv.LogInfo(fmt.Sprintf("OnCmd %s", cmdInFlush)) + + switch cmdName { + case cmdInFlush: + outdateTs.Store(time.Now().UnixMicro()) + + // send out + outCmd, err := ten.NewCmd(cmdOutFlush) + if err != nil { + tenEnv.LogError(fmt.Sprintf("new cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + + if err := tenEnv.SendCmd(outCmd, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("send cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } else { + tenEnv.LogInfo(fmt.Sprintf("cmd %s sent", cmdOutFlush)) + } + } + + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeOk) + tenEnv.ReturnResult(cmdResult, cmd, nil) +} + +// OnData receives data from ten graph. +// current supported data: +// - name: text_data +// example: +// {name: text_data, properties: {text: "hello"} +func (e *fishAudioTTSExtension) OnData( + tenEnv ten.TenEnv, + data ten.Data, +) { + text, err := data.GetPropertyString(dataInTextDataPropertyText) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("OnData GetProperty %s failed, err: %v", dataInTextDataPropertyText, err)) + return + } + + if len(text) == 0 { + tenEnv.LogDebug("OnData text is empty, ignored") + return + } + + tenEnv.LogInfo(fmt.Sprintf("OnData input text: [%s]", text)) + + go func() { + textChan <- &message{text: text, receivedTs: time.Now().UnixMicro()} + }() +} + +func init() { + // Register addon + ten.RegisterAddonAsExtension( + "fish_audio_tts", + ten.NewDefaultExtensionAddon(newFishAudioTTSExtension), + ) +} diff --git a/agents/ten_packages/extension/fish_audio_tts/go.mod b/agents/ten_packages/extension/fish_audio_tts/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..de51965ab4d440b5eddaaa17dfb31f6e37f02af0 --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/go.mod @@ -0,0 +1,12 @@ +module fish_audio_tts + +go 1.20 + +replace ten_framework => ../../system/ten_runtime_go/interface + +require ( + github.com/vmihailenco/msgpack/v5 v5.4.1 + ten_framework v0.0.0-00010101000000-000000000000 +) + +require github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect diff --git a/agents/ten_packages/extension/fish_audio_tts/go.sum b/agents/ten_packages/extension/fish_audio_tts/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..9bb5184443397241576d7d3e0310e2f3500633b3 --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/go.sum @@ -0,0 +1,8 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= diff --git a/agents/ten_packages/extension/fish_audio_tts/manifest.json b/agents/ten_packages/extension/fish_audio_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..fb34e464ea094aac35c8add1f5e00d6393d73abd --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/manifest.json @@ -0,0 +1,56 @@ +{ + "type": "extension", + "name": "fish_audio_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model_id": { + "type": "string" + }, + "request_timeout_seconds": { + "type": "int64" + }, + "optimize_streaming_latency": { + "type": "bool" + }, + "base_url": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/fish_audio_tts/pcm.go b/agents/ten_packages/extension/fish_audio_tts/pcm.go new file mode 100644 index 0000000000000000000000000000000000000000..3d1d788cae2fb9cf9a2f4fb636dcbbf38ca2cf55 --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/pcm.go @@ -0,0 +1,101 @@ +/** + * + * Agora Real Time Engagement + * Created by Hai Guo in 2024-08. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// An extension written by Go for TTS +package extension + +import ( + "fmt" + + "ten_framework/ten" +) + +type pcm struct { + config *pcmConfig +} + +type pcmConfig struct { + BytesPerSample int32 + Channel int32 + ChannelLayout uint64 + Name string + SampleRate int32 + SamplesPerChannel int32 + Timestamp int64 +} + +func defaultPcmConfig() *pcmConfig { + return &pcmConfig{ + BytesPerSample: 2, + Channel: 1, + ChannelLayout: 1, + Name: "pcm_frame", + SampleRate: 44100, + SamplesPerChannel: 44100 / 100, + Timestamp: 0, + } +} + +func newPcm(config *pcmConfig) *pcm { + return &pcm{ + config: config, + } +} + +func (p *pcm) getPcmFrame(tenEnv ten.TenEnv, buf []byte) (pcmFrame ten.AudioFrame, err error) { + pcmFrame, err = ten.NewAudioFrame(p.config.Name) + if err != nil { + tenEnv.LogError(fmt.Sprintf("NewPcmFrame failed, err: %v", err)) + return + } + + // set pcm frame + pcmFrame.SetBytesPerSample(p.config.BytesPerSample) + pcmFrame.SetSampleRate(p.config.SampleRate) + pcmFrame.SetChannelLayout(p.config.ChannelLayout) + pcmFrame.SetNumberOfChannels(p.config.Channel) + pcmFrame.SetTimestamp(p.config.Timestamp) + pcmFrame.SetDataFmt(ten.AudioFrameDataFmtInterleave) + pcmFrame.SetSamplesPerChannel(p.config.SamplesPerChannel) + pcmFrame.AllocBuf(p.getPcmFrameSize()) + + borrowedBuf, err := pcmFrame.LockBuf() + if err != nil { + tenEnv.LogError(fmt.Sprintf("LockBuf failed, err: %v", err)) + return + } + + // copy data + copy(borrowedBuf, buf) + + pcmFrame.UnlockBuf(&borrowedBuf) + return +} + +func (p *pcm) getPcmFrameSize() int { + return int(p.config.SamplesPerChannel * p.config.Channel * p.config.BytesPerSample) +} + +func (p *pcm) newBuf() []byte { + return make([]byte, p.getPcmFrameSize()) +} + +func (p *pcm) send(tenEnv ten.TenEnv, buf []byte) (err error) { + pcmFrame, err := p.getPcmFrame(tenEnv, buf) + if err != nil { + tenEnv.LogError(fmt.Sprintf("getPcmFrame failed, err: %v", err)) + return + } + + // send pcm + if err = tenEnv.SendAudioFrame(pcmFrame, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("SendPcmFrame failed, err: %v", err)) + return + } + + return +} diff --git a/agents/ten_packages/extension/fish_audio_tts/property.json b/agents/ten_packages/extension/fish_audio_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..8053f9b419bcd824b58890c900f910460feef22e --- /dev/null +++ b/agents/ten_packages/extension/fish_audio_tts/property.json @@ -0,0 +1,7 @@ +{ + "api_key": "${env:FISH_AUDIO_TTS_KEY}", + "model_id": "d8639b5cc95548f5afbcfe22d3ba5ce5", + "optimize_streaming_latency": true, + "request_timeout_seconds": 30, + "base_url": "https://api.fish.audio" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/gemini_llm_python/__init__.py b/agents/ten_packages/extension/gemini_llm_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..276699b35d1093128469c09f696da9118efb5ce2 --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/__init__.py @@ -0,0 +1 @@ +from . import gemini_llm_addon diff --git a/agents/ten_packages/extension/gemini_llm_python/gemini_llm.py b/agents/ten_packages/extension/gemini_llm_python/gemini_llm.py new file mode 100644 index 0000000000000000000000000000000000000000..92031d70648b7e248ae8d8b7bbfe67ac12b179e6 --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/gemini_llm.py @@ -0,0 +1,61 @@ +from typing import Dict, List +import google.generativeai as genai + + +class GeminiLLMConfig: + def __init__( + self, + api_key: str, + max_output_tokens: int, + model: str, + prompt: str, + temperature: float, + top_k: int, + top_p: float, + ): + self.api_key = api_key + self.max_output_tokens = max_output_tokens + self.model = model + self.prompt = prompt + self.temperature = temperature + self.top_k = top_k + self.top_p = top_p + + @classmethod + def default_config(cls): + return cls( + api_key="", + max_output_tokens=512, + model="gemini-1.5-flash", + prompt="You are a voice assistant who talks in a conversational way and can chat with me like my friends. I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. Don’t talk like a robot, instead I would like you to talk like a real human with emotions. I will use your answer for text-to-speech, so don’t return me any meaningless characters. I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. When giving me a list of options, express the options in a narrative way instead of bullet points.", + temperature=1.0, + top_k=40, + top_p=0.95, + ) + + +class GeminiLLM: + def __init__(self, config: GeminiLLMConfig): + self.config = config + genai.configure(api_key=self.config.api_key) + self.model = genai.GenerativeModel( + model_name=self.config.model, system_instruction=self.config.prompt + ) + + def get_chat_completions_stream(self, messages: List[Dict[str, str]]): + try: + chat = self.model.start_chat(history=messages[0:-1]) + response = chat.send_message( + messages[-1].get("parts"), + generation_config=genai.types.GenerationConfig( + max_output_tokens=self.config.max_output_tokens, + temperature=self.config.temperature, + top_k=self.config.top_k, + top_p=self.config.top_p, + ), + stream=True, + ) + + return response + except Exception as e: + raise RuntimeError(f"get_chat_completions_stream failed, err: {e}") from e diff --git a/agents/ten_packages/extension/gemini_llm_python/gemini_llm_addon.py b/agents/ten_packages/extension/gemini_llm_python/gemini_llm_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..ea27df9161a770b61d926b2b51c22a4eaa6bf6fb --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/gemini_llm_addon.py @@ -0,0 +1,19 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + +@register_addon_as_extension("gemini_llm_python") +class GeminiLLMExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + from .gemini_llm_extension import GeminiLLMExtension + ten.log_info("on_create_instance") + ten.on_create_instance_done(GeminiLLMExtension(addon_name), context) diff --git a/agents/ten_packages/extension/gemini_llm_python/gemini_llm_extension.py b/agents/ten_packages/extension/gemini_llm_python/gemini_llm_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..99b0851a9e56e063ab66118f59720e589651c802 --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/gemini_llm_extension.py @@ -0,0 +1,284 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from threading import Thread +from ten import ( + Extension, + TenEnv, + Cmd, + Data, + StatusCode, + CmdResult, +) +from .utils import get_micro_ts, parse_sentence + + +CMD_IN_FLUSH = "flush" +CMD_OUT_FLUSH = "flush" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT = "end_of_segment" + +PROPERTY_API_KEY = "api_key" # Required +PROPERTY_GREETING = "greeting" # Optional +PROPERTY_MAX_MEMORY_LENGTH = "max_memory_length" # Optional +PROPERTY_MAX_OUTPUT_TOKENS = "max_output_tokens" # Optional +PROPERTY_MODEL = "model" # Optional +PROPERTY_PROMPT = "prompt" # Optional +PROPERTY_TEMPERATURE = "temperature" # Optional +PROPERTY_TOP_K = "top_k" # Optional +PROPERTY_TOP_P = "top_p" # Optional + + +class GeminiLLMExtension(Extension): + memory = [] + max_memory_length = 10 + outdate_ts = 0 + gemini_llm = None + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("GeminiLLMExtension on_start") + + # lazy import packages which requires long time to load + from .gemini_llm import GeminiLLM, GeminiLLMConfig + + # Prepare configuration + gemini_llm_config = GeminiLLMConfig.default_config() + + try: + api_key = ten.get_property_string(PROPERTY_API_KEY) + gemini_llm_config.api_key = api_key + except Exception as err: + ten.log_info(f"GetProperty required {PROPERTY_API_KEY} failed, err: {err}") + return + + for key in [PROPERTY_GREETING, PROPERTY_MODEL, PROPERTY_PROMPT]: + try: + val = ten.get_property_string(key) + if val: + setattr(gemini_llm_config, key, val) + except Exception as e: + ten.log_warn(f"get_property_string optional {key} failed, err: {e}") + + for key in [PROPERTY_TEMPERATURE, PROPERTY_TOP_P]: + try: + setattr(gemini_llm_config, key, float(ten.get_property_float(key))) + except Exception as e: + ten.log_warn(f"get_property_float optional {key} failed, err: {e}") + + for key in [PROPERTY_MAX_OUTPUT_TOKENS, PROPERTY_TOP_K]: + try: + setattr(gemini_llm_config, key, int(ten.get_property_int(key))) + except Exception as e: + ten.log_warn(f"get_property_int optional {key} failed, err: {e}") + + try: + prop_max_memory_length = ten.get_property_int(PROPERTY_MAX_MEMORY_LENGTH) + if prop_max_memory_length > 0: + self.max_memory_length = int(prop_max_memory_length) + except Exception as err: + ten.log_warn( + f"GetProperty optional {PROPERTY_MAX_MEMORY_LENGTH} failed, err: {err}" + ) + + # Create GeminiLLM instance + self.gemini_llm = GeminiLLM(gemini_llm_config) + ten.log_info( + f"newGeminiLLM succeed with max_output_tokens: {gemini_llm_config.max_output_tokens}, model: {gemini_llm_config.model}" + ) + + # Send greeting if available + greeting = ten.get_property_string(PROPERTY_GREETING) + if greeting: + try: + output_data = Data.create("text_data") + output_data.set_property_string( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT, greeting + ) + output_data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, True + ) + ten.send_data(output_data) + ten.log_info(f"greeting [{greeting}] sent") + except Exception as e: + ten.log_error(f"greeting [{greeting}] send failed, err: {e}") + + ten.on_start_done() + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("GeminiLLMExtension on_stop") + ten.on_stop_done() + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + ten.log_info("GeminiLLMExtension on_cmd") + cmd_name = cmd.get_name() + ten.log_info(f"GeminiLLMExtension on_cmd json: {cmd_name}") + + cmd_name = cmd.get_name() + + if cmd_name == CMD_IN_FLUSH: + self.outdate_ts = get_micro_ts() + cmd_out = Cmd.create(CMD_OUT_FLUSH) + ten.send_cmd(cmd_out, None) + ten.log_info("GeminiLLMExtension on_cmd sent flush") + else: + ten.log_info(f"GeminiLLMExtension on_cmd unknown cmd: {cmd_name}") + cmd_result = CmdResult.create(StatusCode.ERROR) + cmd_result.set_property_string("detail", "unknown cmd") + ten.return_result(cmd_result, cmd) + return + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "success") + ten.return_result(cmd_result, cmd) + + def on_data(self, ten: TenEnv, data: Data) -> None: + """ + on_data receives data from ten graph. + current supported data: + - name: text_data + example: + {name: text_data, properties: {text: "hello"} + """ + ten.log_info("GeminiLLMExtension on_data") + + # Assume 'data' is an object from which we can get properties + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + if not is_final: + ten.log_info("ignore non-final input") + return + except Exception as e: + ten.log_error( + f"on_data get_property_bool {DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL} failed, err: {e}" + ) + return + + # Get input text + try: + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + if not input_text: + ten.log_info("ignore empty text") + return + ten.log_info(f"on_data input text: [{input_text}]") + except Exception as e: + ten.log_error( + f"on_data get_property_string {DATA_IN_TEXT_DATA_PROPERTY_TEXT} failed, err: {e}" + ) + return + + # Prepare memory + if len(self.memory) > self.max_memory_length: + self.memory.pop(0) + self.memory.append({"role": "user", "parts": input_text}) + + def chat_completions_stream_worker(start_time, input_text, memory): + try: + ten.log_info( + f"chat_completions_stream_worker for input text: [{input_text}] memory: {memory}" + ) + + # Get result from AI + resp = self.gemini_llm.get_chat_completions_stream(memory) + if resp is None: + ten.log_info( + f"chat_completions_stream_worker for input text: [{input_text}] failed" + ) + return + + sentence = "" + full_content = "" + first_sentence_sent = False + + for chat_completions in resp: + if start_time < self.outdate_ts: + ten.log_info( + f"chat_completions_stream_worker recv interrupt and flushing for input text: [{input_text}], startTs: {start_time}, outdateTs: {self.outdate_ts}" + ) + break + + if chat_completions.text is not None: + content = chat_completions.text + else: + content = "" + + full_content += content + + while True: + sentence, content, sentence_is_final = parse_sentence( + sentence, content + ) + + if len(sentence) == 0 or not sentence_is_final: + ten.log_info(f"sentence {sentence} is empty or not final") + break + + ten.log_info( + f"chat_completions_stream_worker recv for input text: [{input_text}] got sentence: [{sentence}]" + ) + + # send sentence + try: + output_data = Data.create("text_data") + output_data.set_property_string( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT, sentence + ) + output_data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, False + ) + ten.send_data(output_data) + ten.log_info( + f"chat_completions_stream_worker recv for input text: [{input_text}] sent sentence [{sentence}]" + ) + except Exception as e: + ten.log_error( + f"chat_completions_stream_worker recv for input text: [{input_text}] send sentence [{sentence}] failed, err: {e}" + ) + break + + sentence = "" + if not first_sentence_sent: + first_sentence_sent = True + ten.log_info( + f"chat_completions_stream_worker recv for input text: [{input_text}] first sentence sent, first_sentence_latency {get_micro_ts() - start_time}ms" + ) + + # remember response as assistant content in memory + memory.append({"role": "model", "parts": full_content}) + + # send end of segment + try: + output_data = Data.create("text_data") + output_data.set_property_string( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT, sentence + ) + output_data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, True + ) + ten.send_data(output_data) + ten.log_info( + f"chat_completions_stream_worker for input text: [{input_text}] end of segment with sentence [{sentence}] sent" + ) + except Exception as e: + ten.log_error( + f"chat_completions_stream_worker for input text: [{input_text}] end of segment with sentence [{sentence}] send failed, err: {e}" + ) + + except Exception as e: + ten.log_error( + f"chat_completions_stream_worker for input text: [{input_text}] failed, err: {e}" + ) + + # Start thread to request and read responses from GeminiLLM + start_time = get_micro_ts() + thread = Thread( + target=chat_completions_stream_worker, + args=(start_time, input_text, self.memory), + ) + thread.start() + ten.log_info("GeminiLLMExtension on_data end") diff --git a/agents/ten_packages/extension/gemini_llm_python/manifest.json b/agents/ten_packages/extension/gemini_llm_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..34f9ef4e8bcc78d40d8573857eeb856fce97d301 --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/manifest.json @@ -0,0 +1,73 @@ +{ + "type": "extension", + "name": "gemini_llm_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + }, + "max_output_tokens": { + "type": "int64" + }, + "model": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "temperature": { + "type": "float64" + }, + "top_k": { + "type": "int64" + }, + "top_p": { + "type": "float64" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/gemini_llm_python/property.json b/agents/ten_packages/extension/gemini_llm_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..a1325831cb97954c39e8c2dd6b464f13a70712e0 --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/property.json @@ -0,0 +1,11 @@ +{ + "api_key": "${env:GEMINI_API_KEY}", + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10, + "max_output_tokens": 512, + "model": "gemini-1.5-flash", + "prompt": "", + "temperature": 0.9, + "top_k": 40, + "top_p": 0.95 +} \ No newline at end of file diff --git a/agents/ten_packages/extension/gemini_llm_python/requirements.txt b/agents/ten_packages/extension/gemini_llm_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..309f817a5abdb6336e2bc93af5cae021bcfdc97a --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/requirements.txt @@ -0,0 +1 @@ +google-generativeai~=0.7.2 \ No newline at end of file diff --git a/agents/ten_packages/extension/gemini_llm_python/utils.py b/agents/ten_packages/extension/gemini_llm_python/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e387d906a86ea4b2b5647a820bb361a9df9f209b --- /dev/null +++ b/agents/ten_packages/extension/gemini_llm_python/utils.py @@ -0,0 +1,19 @@ +import time + + +def get_micro_ts(): + return int(time.time() * 1_000_000) + + +def is_punctuation(char: str): + return char in [",", ",", ".", "。", "?", "?", "!", "!"] + + +def parse_sentence(sentence: str, content: str): + for i, char in enumerate(content): + sentence += char + + if is_punctuation(char): + return sentence, content[i + 1:], True + + return sentence, "", False diff --git a/agents/ten_packages/extension/gemini_v2v_python/README.md b/agents/ten_packages/extension/gemini_v2v_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e43f7041951aa00a43d8be3958cf98be7e80a603 --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/README.md @@ -0,0 +1,63 @@ +# gemini_v2v_python + +An extension for integrating Gemini's Next Generation of **Multimodal** AI into your application, providing configurable AI-driven features such as conversational agents, task automation, and tool integration. + +## Features + +- Gemini **Multimodal** Integration: Leverage Gemini **Multimodal** models for voice-to-voice as well as text processing. +- Configurable: Easily customize API keys, model settings, prompts, temperature, etc. +- Async Queue Processing: Supports real-time message processing with task cancellation and prioritization. + +## API + +Refer to the `api` definition in [manifest.json] and default values in [property.json](property.json). + +| **Property** | **Type** | **Description** | +|----------------------------|------------|-------------------------------------------| +| `api_key` | `string` | API key for authenticating with Gemini | +| `temperature` | `float32` | Sampling temperature, higher values mean more randomness | +| `model` | `string` | Model identifier (e.g., GPT-4, Gemini-1) | +| `max_tokens` | `int32` | Maximum number of tokens to generate | +| `system_message` | `string` | Default system message to send to the model | +| `voice` | `string` | Voice that Gemini model uses, such as `alloy`, `echo`, `shimmer`, etc. | +| `server_vad` | `bool` | Flag to enable or disable server VAD for Gemini | +| `language` | `string` | Language that Gemini model responds in, such as `en-US`, `zh-CN`, etc. | +| `dump` | `bool` | Flag to enable or disable audio dump for debugging purposes | +| `base_uri` | `string` | Base URI for connecting to the Gemini service | +| `audio_out` | `bool` | Flag to enable or disable audio output | +| `input_transcript` | `bool` | Flag to enable input transcript processing | +| `sample_rate` | `int32` | Sample rate for audio processing | +| `stream_id` | `int32` | Stream ID for identifying audio streams | +| `greeting` | `string` | Greeting message for initial interaction | + +### Data Out + +| **Name** | **Property** | **Type** | **Description** | +|----------------|--------------|------------|-------------------------------| +| `text_data` | `text` | `string` | Outgoing text data | +| `append` | `text` | `string` | Additional text appended to the output | + +### Command Out + +| **Name** | **Description** | +|----------------|---------------------------------------------| +| `flush` | Response after flushing the current state | +| `tool_call` | Invokes a tool with specific arguments | + +### Audio Frame In + +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `pcm_frame` | Audio frame input for voice processing | + +### Video Frame In + +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `video_frame` | Video frame input for processing | + +### Audio Frame Out + +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `pcm_frame` | Audio frame output after voice processing | diff --git a/agents/ten_packages/extension/gemini_v2v_python/__init__.py b/agents/ten_packages/extension/gemini_v2v_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd75ddef4ae8e15366d6ed94ee557e6481a4989 --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/gemini_v2v_python/addon.py b/agents/ten_packages/extension/gemini_v2v_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..138017428a24a465d39ca82b4d27db900b167243 --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/addon.py @@ -0,0 +1,21 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("gemini_v2v_python") +class GeminiRealtimeExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import GeminiRealtimeExtension + ten_env.log_info("GeminiRealtimeExtensionAddon on_create_instance") + ten_env.on_create_instance_done(GeminiRealtimeExtension(name), context) diff --git a/agents/ten_packages/extension/gemini_v2v_python/extension.py b/agents/ten_packages/extension/gemini_v2v_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..e8518e232eb9c195b25a916cbf7fb3c84f152e4c --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/extension.py @@ -0,0 +1,743 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import asyncio +import base64 +from enum import Enum +import json +import traceback +import time +from google import genai +import numpy as np +from typing import Iterable, cast + +import websockets + +from ten import ( + AudioFrame, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +from ten.audio_frame import AudioFrameDataFmt +from ten_ai_base.const import CMD_PROPERTY_RESULT, CMD_TOOL_CALL +from dataclasses import dataclass +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ChatMemory +from ten_ai_base.usage import ( + LLMUsage, + LLMCompletionTokensDetails, + LLMPromptTokensDetails, +) +from ten_ai_base.types import ( + LLMToolMetadata, + LLMToolResult, + LLMChatCompletionContentPartParam, + TTSPcmOptions, +) +from ten_ai_base.llm import AsyncLLMBaseExtension +from google.genai.types import ( + LiveServerMessage, + LiveConnectConfig, + LiveConnectConfigDict, + GenerationConfig, + Content, + Part, + Tool, + FunctionDeclaration, + Schema, + LiveClientToolResponse, + FunctionCall, + FunctionResponse, + SpeechConfig, + VoiceConfig, + PrebuiltVoiceConfig, +) +from google.genai.live import AsyncSession +from PIL import Image +from io import BytesIO +from base64 import b64encode + +import urllib.parse +import google.genai._api_client + +google.genai._api_client.urllib = urllib # pylint: disable=protected-access + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" + + +class Role(str, Enum): + User = "user" + Assistant = "assistant" + + +def rgb2base64jpeg(rgb_data, width, height): + # Convert the RGB image to a PIL Image + pil_image = Image.frombytes("RGBA", (width, height), bytes(rgb_data)) + pil_image = pil_image.convert("RGB") + + # Resize the image while maintaining its aspect ratio + pil_image = resize_image_keep_aspect(pil_image, 512) + + # Save the image to a BytesIO object in JPEG format + buffered = BytesIO() + pil_image.save(buffered, format="JPEG") + # pil_image.save("test.jpg", format="JPEG") + + # Get the byte data of the JPEG image + jpeg_image_data = buffered.getvalue() + + # Convert the JPEG byte data to a Base64 encoded string + base64_encoded_image = b64encode(jpeg_image_data).decode("utf-8") + + # Create the data URL + # mime_type = "image/jpeg" + return base64_encoded_image + + +def resize_image_keep_aspect(image, max_size=512): + """ + Resize an image while maintaining its aspect ratio, ensuring the larger dimension is max_size. + If both dimensions are smaller than max_size, the image is not resized. + + :param image: A PIL Image object + :param max_size: The maximum size for the larger dimension (width or height) + :return: A PIL Image object (resized or original) + """ + # Get current width and height + width, height = image.size + + # If both dimensions are already smaller than max_size, return the original image + if width <= max_size and height <= max_size: + return image + + # Calculate the aspect ratio + aspect_ratio = width / height + + # Determine the new dimensions + if width > height: + new_width = max_size + new_height = int(max_size / aspect_ratio) + else: + new_height = max_size + new_width = int(max_size * aspect_ratio) + + # Resize the image with the new dimensions + resized_image = image.resize((new_width, new_height)) + + return resized_image + + +@dataclass +class GeminiRealtimeConfig(BaseConfig): + base_uri: str = "generativelanguage.googleapis.com" + api_key: str = "" + api_version: str = "v1alpha" + model: str = "gemini-2.0-flash-exp" + language: str = "en-US" + prompt: str = "" + temperature: float = 0.5 + max_tokens: int = 1024 + voice: str = "Puck" + server_vad: bool = True + audio_out: bool = True + input_transcript: bool = True + sample_rate: int = 24000 + stream_id: int = 0 + dump: bool = False + greeting: str = "" + + def build_ctx(self) -> dict: + return { + "language": self.language, + "model": self.model, + } + + +class GeminiRealtimeExtension(AsyncLLMBaseExtension): + def __init__(self, name): + super().__init__(name) + self.config: GeminiRealtimeConfig = None + self.stopped: bool = False + self.connected: bool = False + self.buffer: bytearray = b"" + self.memory: ChatMemory = None + self.total_usage: LLMUsage = LLMUsage() + self.users_count = 0 + + self.stream_id: int = 0 + self.remote_stream_id: int = 0 + self.channel_name: str = "" + self.audio_len_threshold: int = 5120 + + self.completion_times = [] + self.connect_times = [] + self.first_token_times = [] + + self.buff: bytearray = b"" + self.transcript: str = "" + self.ctx: dict = {} + self.input_end = time.time() + self.client = None + self.session: AsyncSession = None + self.leftover_bytes = b"" + self.video_task = None + self.image_queue = asyncio.Queue() + self.video_buff: str = "" + self.loop = None + self.ten_env = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + self.ten_env = ten_env + ten_env.log_debug("on_start") + + self.loop = asyncio.get_event_loop() + + self.config = await GeminiRealtimeConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.api_key: + ten_env.log_error("api_key is required") + return + + try: + self.ctx = self.config.build_ctx() + self.ctx["greeting"] = self.config.greeting + + self.client = genai.Client( + api_key=self.config.api_key, + http_options={ + "api_version": self.config.api_version, + "url": self.config.base_uri, + }, + ) + self.loop.create_task(self._loop(ten_env)) + self.loop.create_task(self._on_video(ten_env)) + + # self.loop.create_task(self._loop()) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to init client {e}") + + async def _loop(self, ten_env: AsyncTenEnv) -> None: + while not self.stopped: + await asyncio.sleep(1) + try: + config: LiveConnectConfig = self._get_session_config() + ten_env.log_info("Start listen") + async with self.client.aio.live.connect( + model=self.config.model, config=config + ) as session: + ten_env.log_info("Connected") + session = cast(AsyncSession, session) + self.session = session + self.connected = True + + await self._greeting() + + while True: + try: + async for response in session.receive(): + response = cast(LiveServerMessage, response) + # ten_env.log_info(f"Received response") + try: + if response.server_content: + if response.server_content.interrupted: + ten_env.log_info("Interrupted") + await self._flush() + continue + elif ( + not response.server_content.turn_complete + and response.server_content.model_turn + ): + for ( + part + ) in ( + response.server_content.model_turn.parts + ): + await self.send_audio_out( + ten_env, + part.inline_data.data, + sample_rate=24000, + bytes_per_sample=2, + number_of_channels=1, + ) + elif response.server_content.turn_complete: + ten_env.log_info("Turn complete") + elif response.setup_complete: + ten_env.log_info("Setup complete") + elif response.tool_call: + func_calls = response.tool_call.function_calls + self.loop.create_task( + self._handle_tool_call(func_calls) + ) + except Exception: + traceback.print_exc() + ten_env.log_error("Failed to handle response") + + await self._flush() + ten_env.log_info("Finish listen") + except websockets.exceptions.ConnectionClosedOK: + ten_env.log_info("Connection closed") + break + except Exception as e: + self.ten_env.log_error(f"Failed to handle loop {e}") + + async def send_audio_out( + self, ten_env: AsyncTenEnv, audio_data: bytes, **args: TTSPcmOptions + ) -> None: + """End sending audio out.""" + sample_rate = args.get("sample_rate", 24000) + bytes_per_sample = args.get("bytes_per_sample", 2) + number_of_channels = args.get("number_of_channels", 1) + try: + # Combine leftover bytes with new audio data + combined_data = self.leftover_bytes + audio_data + + # Check if combined_data length is odd + if len(combined_data) % (bytes_per_sample * number_of_channels) != 0: + # Save the last incomplete frame + valid_length = len(combined_data) - ( + len(combined_data) % (bytes_per_sample * number_of_channels) + ) + self.leftover_bytes = combined_data[valid_length:] + combined_data = combined_data[:valid_length] + else: + self.leftover_bytes = b"" + + if combined_data: + f = AudioFrame.create("pcm_frame") + f.set_sample_rate(sample_rate) + f.set_bytes_per_sample(bytes_per_sample) + f.set_number_of_channels(number_of_channels) + f.set_data_fmt(AudioFrameDataFmt.INTERLEAVE) + f.set_samples_per_channel( + len(combined_data) // (bytes_per_sample * number_of_channels) + ) + f.alloc_buf(len(combined_data)) + buff = f.lock_buf() + buff[:] = combined_data + f.unlock_buf(buff) + await ten_env.send_audio_frame(f) + except Exception: + pass + # ten_env.log_error(f"error send audio frame, {traceback.format_exc()}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_info("on_stop") + + self.stopped = True + if self.session: + await self.session.close() + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + await super().on_audio_frame(ten_env, audio_frame) + try: + stream_id = audio_frame.get_property_int("stream_id") + if self.channel_name == "": + self.channel_name = audio_frame.get_property_string("channel") + + if self.remote_stream_id == 0: + self.remote_stream_id = stream_id + + frame_buf = audio_frame.get_buf() + self._dump_audio_if_need(frame_buf, Role.User) + + await self._on_audio(frame_buf) + if not self.config.server_vad: + self.input_end = time.time() + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"on audio frame failed {e}") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug(f"on_cmd name {cmd_name}") + + status = StatusCode.OK + detail = "success" + + if cmd_name == CMD_IN_FLUSH: + # Will only flush if it is client side vad + await self._flush() + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + ten_env.log_info("on flush") + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.users_count == 1: + await self._greeting() + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + # Register tool + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(status) + cmd_result.set_property_string("detail", detail) + await ten_env.return_result(cmd_result, cmd) + + # Not support for now + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + pass + + async def on_video_frame(self, async_ten_env, video_frame): + await super().on_video_frame(async_ten_env, video_frame) + image_data = video_frame.get_buf() + image_width = video_frame.get_width() + image_height = video_frame.get_height() + await self.image_queue.put([image_data, image_width, image_height]) + + async def _on_video(self, _: AsyncTenEnv): + while True: + + # Process the first frame from the queue + [image_data, image_width, image_height] = await self.image_queue.get() + self.video_buff = rgb2base64jpeg(image_data, image_width, image_height) + media_chunks = [ + { + "data": self.video_buff, + "mime_type": "image/jpeg", + } + ] + try: + if self.connected: + # ten_env.log_info(f"send image") + await self.session.send(media_chunks) + except Exception as e: + self.ten_env.log_error(f"Failed to send image {e}") + + # Skip remaining frames for the second + while not self.image_queue.empty(): + await self.image_queue.get() + + # Wait for 1 second before processing the next frame + await asyncio.sleep(1) + + # Direction: IN + async def _on_audio(self, buff: bytearray): + self.buff += buff + # Buffer audio + if self.connected and len(self.buff) >= self.audio_len_threshold: + # await self.conn.send_audio_data(self.buff) + try: + media_chunks = [ + { + "data": base64.b64encode(self.buff).decode(), + "mime_type": "audio/pcm", + } + ] + # await self.session.send(LiveClientRealtimeInput(media_chunks=media_chunks)) + await self.session.send(media_chunks) + self.buff = b"" + except Exception as e: + # pass + self.ten_env.log_error(f"Failed to send audio {e}") + + def _get_session_config(self) -> LiveConnectConfigDict: + def tool_dict(tool: LLMToolMetadata): + required = [] + properties: dict[str, "Schema"] = {} + + for param in tool.parameters: + properties[param.name] = Schema( + type=param.type.upper(), description=param.description + ) + if param.required: + required.append(param.name) + + t = Tool( + function_declarations=[ + FunctionDeclaration( + name=tool.name, + description=tool.description, + parameters=Schema( + type="OBJECT", properties=properties, required=required + ), + ) + ] + ) + + return t + + tools = ( + [tool_dict(t) for t in self.available_tools] + if len(self.available_tools) > 0 + else [] + ) + + tools.append(Tool(google_search={})) + tools.append(Tool(code_execution={})) + + config = LiveConnectConfig( + response_modalities=["AUDIO"], + system_instruction=Content(parts=[Part(text=self.config.prompt)]), + tools=tools, + # voice is currently not working + speech_config=SpeechConfig( + voice_config=VoiceConfig( + prebuilt_voice_config=PrebuiltVoiceConfig( + voice_name=self.config.voice + ) + ) + ), + generation_config=GenerationConfig( + temperature=self.config.temperature, + max_output_tokens=self.config.max_tokens, + ), + ) + + return config + + async def on_tools_update( + self, ten_env: AsyncTenEnv, tool: LLMToolMetadata + ) -> None: + """Called when a new tool is registered. Implement this method to process the new tool.""" + ten_env.log_info(f"on tools update {tool}") + # await self._update_session() + + def _replace(self, prompt: str) -> str: + result = prompt + for token, value in self.ctx.items(): + result = result.replace("{" + token + "}", value) + return result + + def _send_transcript(self, content: str, role: Role, is_final: bool) -> None: + def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + # Check if the current sentence contains non-punctuation characters + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" # Reset for the next sentence + + remain = current_sentence # Any remaining characters form the incomplete sentence + return sentences, remain + + def send_data( + ten_env: AsyncTenEnv, + sentence: str, + stream_id: int, + role: str, + is_final: bool, + ): + try: + d = Data.create("text_data") + d.set_property_string("text", sentence) + d.set_property_bool("end_of_segment", is_final) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + ten_env.log_info( + f"send transcript text [{sentence}] stream_id {stream_id} is_final {is_final} end_of_segment {is_final} role {role}" + ) + asyncio.create_task(ten_env.send_data(d)) + except Exception as e: + ten_env.log_error( + f"Error send text data {role}: {sentence} {is_final} {e}" + ) + + stream_id = self.remote_stream_id if role == Role.User else 0 + try: + if role == Role.Assistant and not is_final: + sentences, self.transcript = parse_sentences(self.transcript, content) + for s in sentences: + asyncio.create_task( + send_data(self.ten_env, s, stream_id, role, is_final) + ) + else: + asyncio.create_task( + send_data(self.ten_env, content, stream_id, role, is_final) + ) + except Exception as e: + self.ten_env.log_error( + f"Error send text data {role}: {content} {is_final} {e}" + ) + + def _dump_audio_if_need(self, buf: bytearray, role: Role) -> None: + if not self.config.dump: + return + + with open("{}_{}.pcm".format(role, self.channel_name), "ab") as dump_file: + dump_file.write(buf) + + async def _handle_tool_call(self, func_calls: list[FunctionCall]) -> None: + function_responses = [] + for call in func_calls: + tool_call_id = call.id + name = call.name + arguments = call.args + self.ten_env.log_info( + f"_handle_tool_call {tool_call_id} {name} {arguments}" + ) + cmd: Cmd = Cmd.create(CMD_TOOL_CALL) + cmd.set_property_string("name", name) + cmd.set_property_from_json("arguments", json.dumps(arguments)) + [result, _] = await self.ten_env.send_cmd(cmd) + + func_response = FunctionResponse( + id=tool_call_id, name=name, response={"error": "Failed to call tool"} + ) + if result.get_status_code() == StatusCode.OK: + tool_result: LLMToolResult = json.loads( + result.get_property_to_json(CMD_PROPERTY_RESULT) + ) + + result_content = tool_result["content"] + func_response = FunctionResponse( + id=tool_call_id, name=name, response={"output": result_content} + ) + self.ten_env.log_info(f"tool_result: {tool_call_id} {tool_result}") + else: + self.ten_env.log_error("Tool call failed") + function_responses.append(func_response) + # await self.conn.send_request(tool_response) + # await self.conn.send_request(ResponseCreate()) + self.ten_env.log_info(f"_remote_tool_call finish {name} {arguments}") + try: + self.ten_env.log_info(f"send tool response {function_responses}") + await self.session.send( + LiveClientToolResponse(function_responses=function_responses) + ) + except Exception as e: + self.ten_env.log_error(f"Failed to send tool response {e}") + + def _greeting_text(self) -> str: + text = "Hi, there." + if self.config.language == "zh-CN": + text = "你好。" + elif self.config.language == "ja-JP": + text = "こんにちは" + elif self.config.language == "ko-KR": + text = "안녕하세요" + return text + + def _convert_tool_params_to_dict(self, tool: LLMToolMetadata): + json_dict = {"type": "object", "properties": {}, "required": []} + + for param in tool.parameters: + json_dict["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + json_dict["required"].append(param.name) + + return json_dict + + def _convert_to_content_parts( + self, content: Iterable[LLMChatCompletionContentPartParam] + ): + content_parts = [] + + if isinstance(content, str): + content_parts.append({"type": "text", "text": content}) + else: + for part in content: + # Only text content is supported currently for v2v model + if part["type"] == "text": + content_parts.append(part) + return content_parts + + async def _greeting(self) -> None: + if self.connected and self.users_count == 1: + text = self._greeting_text() + if self.config.greeting: + text = "Say '" + self.config.greeting + "' to me." + self.ten_env.log_info(f"send greeting {text}") + await self.session.send(text, end_of_turn=True) + + async def _flush(self) -> None: + try: + c = Cmd.create("flush") + await self.ten_env.send_cmd(c) + except Exception: + self.ten_env.log_error("Error flush") + + async def _update_usage(self, usage: dict) -> None: + self.total_usage.completion_tokens += usage.get("output_tokens") + self.total_usage.prompt_tokens += usage.get("input_tokens") + self.total_usage.total_tokens += usage.get("total_tokens") + if not self.total_usage.completion_tokens_details: + self.total_usage.completion_tokens_details = LLMCompletionTokensDetails() + if not self.total_usage.prompt_tokens_details: + self.total_usage.prompt_tokens_details = LLMPromptTokensDetails() + + if usage.get("output_token_details"): + self.total_usage.completion_tokens_details.accepted_prediction_tokens += ( + usage["output_token_details"].get("text_tokens") + ) + self.total_usage.completion_tokens_details.audio_tokens += usage[ + "output_token_details" + ].get("audio_tokens") + + if usage.get("input_token_details:"): + self.total_usage.prompt_tokens_details.audio_tokens += usage[ + "input_token_details" + ].get("audio_tokens") + self.total_usage.prompt_tokens_details.cached_tokens += usage[ + "input_token_details" + ].get("cached_tokens") + self.total_usage.prompt_tokens_details.text_tokens += usage[ + "input_token_details" + ].get("text_tokens") + + self.ten_env.log_info(f"total usage: {self.total_usage}") + + data = Data.create("llm_stat") + data.set_property_from_json("usage", json.dumps(self.total_usage.model_dump())) + if self.connect_times and self.completion_times and self.first_token_times: + data.set_property_from_json( + "latency", + json.dumps( + { + "connection_latency_95": np.percentile(self.connect_times, 95), + "completion_latency_95": np.percentile( + self.completion_times, 95 + ), + "first_token_latency_95": np.percentile( + self.first_token_times, 95 + ), + "connection_latency_99": np.percentile(self.connect_times, 99), + "completion_latency_99": np.percentile( + self.completion_times, 99 + ), + "first_token_latency_99": np.percentile( + self.first_token_times, 99 + ), + } + ), + ) + asyncio.create_task(self.ten_env.send_data(data)) + + async def on_call_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError + + async def on_data_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError diff --git a/agents/ten_packages/extension/gemini_v2v_python/manifest.json b/agents/ten_packages/extension/gemini_v2v_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..ab70a6a415dfe39e0e58050f2ed5565e2b5398ef --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/manifest.json @@ -0,0 +1,171 @@ +{ + "type": "extension", + "name": "gemini_v2v_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "realtime/**.tent", + "realtime/**.py" + ] + }, + "api": { + "property": { + "base_uri": { + "type": "string" + }, + "api_key": { + "type": "string" + }, + "api_version": { + "type": "string" + }, + "model": { + "type": "string" + }, + "language": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "temperature": { + "type": "float32" + }, + "max_tokens": { + "type": "int32" + }, + "voice": { + "type": "string" + }, + "server_vad": { + "type": "bool" + }, + "audio_out": { + "type": "bool" + }, + "input_transcript": { + "type": "bool" + }, + "sample_rate": { + "type": "int32" + }, + "stream_id": { + "type": "int32" + }, + "dump": { + "type": "bool" + }, + "greeting": { + "type": "string" + } + }, + "audio_frame_in": [ + { + "name": "pcm_frame", + "property": { + "stream_id": { + "type": "int64" + } + } + } + ], + "video_frame_in": [ + { + "name": "video_frame", + "property": {} + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + }, + { + "name": "append", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + }, + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/gemini_v2v_python/property.json b/agents/ten_packages/extension/gemini_v2v_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..40f185505cc45ad48c6da418f40ed7397589f14a --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/property.json @@ -0,0 +1,11 @@ +{ + "api_key": "${env:GEMINI_API_KEY}", + "temperature": 0.9, + "base_uri": "generativelanguage.googleapis.com", + "model": "gemini-2.0-flash-exp", + "api_version": "v1alpha", + "max_tokens": 2048, + "voice": "Puck", + "language": "en-US", + "server_vad": true +} \ No newline at end of file diff --git a/agents/ten_packages/extension/gemini_v2v_python/requirements.txt b/agents/ten_packages/extension/gemini_v2v_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a6d6d5a82f2f381366293c35e23c4466fb5ae2e4 --- /dev/null +++ b/agents/ten_packages/extension/gemini_v2v_python/requirements.txt @@ -0,0 +1,2 @@ +asyncio +google-genai==0.3.0 \ No newline at end of file diff --git a/agents/ten_packages/extension/glm_v2v_python/README.md b/agents/ten_packages/extension/glm_v2v_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e2fd6cb5427ddbe2ac416940c60ac3f7066228b1 --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/README.md @@ -0,0 +1,47 @@ +# glm_v2v_python + +An extension for integrating GLM's **Multimodal** AI into your application, providing configurable AI-driven features such as conversational agents, task automation, and tool integration. + +## Features + + + +- GLM **Multimodal** Integration: Leverage GLM **Multimodal** models for voice to voice as well as text processing. +- Configurable: Easily customize API keys, model settings, prompts, temperature, etc. +- Async Queue Processing: Supports real-time message processing with task cancellation and prioritization. + + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +| **Property** | **Type** | **Description** | +|----------------------------|------------|-------------------------------------------| +| `api_key` | `string` | API key for authenticating with OpenAI | +| `max_tokens` | `int64` | Maximum number of tokens to generate | +| `prompt` | `string` | Default system message to send to the model | +| `server_vad` | `bool` | Flag to enable or disable server vad of OpenAI | +| `dump` | `bool` | Flag to enable or disable audio dump for debugging purpose | + +### Data Out: +| **Name** | **Property** | **Type** | **Description** | +|----------------|--------------|------------|-------------------------------| +| `text_data` | `text` | `string` | Outgoing text data | + +### Command Out: +| **Name** | **Description** | +|----------------|---------------------------------------------| +| `flush` | Response after flushing the current state | + +### Audio Frame In: +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `pcm_frame` | Audio frame input for voice processing | + +### Audio Frame Out: +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `pcm_frame` | Audio frame output after voice processing | + diff --git a/agents/ten_packages/extension/glm_v2v_python/__init__.py b/agents/ten_packages/extension/glm_v2v_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd75ddef4ae8e15366d6ed94ee557e6481a4989 --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/glm_v2v_python/addon.py b/agents/ten_packages/extension/glm_v2v_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..c12c44dc4f6ef3e2448ef224c7ed04e36aab16ad --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/addon.py @@ -0,0 +1,22 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("glm_v2v_python") +class GLMRealtimeExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import GLMRealtimeExtension + + ten_env.log_info("GLMRealtimeExtensionAddon on_create_instance") + ten_env.on_create_instance_done(GLMRealtimeExtension(name), context) diff --git a/agents/ten_packages/extension/glm_v2v_python/extension.py b/agents/ten_packages/extension/glm_v2v_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..3e72493e5987f5d95f57e1ba05f751ecd5b0066c --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/extension.py @@ -0,0 +1,869 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import asyncio +import base64 +import io +import json +from enum import Enum +import traceback +import time +import numpy as np +from datetime import datetime +from typing import Iterable +from pydub import AudioSegment + +from ten import ( + AudioFrame, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +from ten.audio_frame import AudioFrameDataFmt +from ten_ai_base.const import CMD_PROPERTY_RESULT, CMD_TOOL_CALL +from dataclasses import dataclass +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ( + ChatMemory, + EVENT_MEMORY_EXPIRED, + EVENT_MEMORY_APPENDED, +) +from ten_ai_base.usage import ( + LLMUsage, + LLMCompletionTokensDetails, + LLMPromptTokensDetails, +) +from ten_ai_base.types import ( + LLMToolMetadata, + LLMToolResult, + LLMChatCompletionContentPartParam, +) +from ten_ai_base.llm import AsyncLLMBaseExtension +from .realtime.connection import RealtimeApiConnection +from .realtime.struct import ( + AudioFormats, + ItemCreate, + SessionCreated, + ItemCreated, + UserMessageItemParam, + AssistantMessageItemParam, + ItemInputAudioTranscriptionCompleted, + ItemInputAudioTranscriptionFailed, + ResponseCreated, + ResponseDone, + ResponseAudioTranscriptDelta, + ResponseTextDelta, + ResponseAudioTranscriptDone, + ResponseTextDone, + ResponseOutputItemDone, + ResponseOutputItemAdded, + ResponseAudioDelta, + ResponseAudioDone, + InputAudioBufferSpeechStarted, + InputAudioBufferSpeechStopped, + ResponseFunctionCallArgumentsDone, + ErrorMessage, + ItemDelete, + SessionUpdate, + SessionUpdateParams, + InputAudioTranscription, + ContentType, + FunctionCallOutputItemParam, + ResponseCreate, +) + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" + + +class Role(str, Enum): + User = "user" + Assistant = "assistant" + + +@dataclass +class GLMRealtimeConfig(BaseConfig): + base_uri: str = "wss://open.bigmodel.cn" + api_key: str = "" + path: str = "/api/paas/v4/realtime" + prompt: str = "" + temperature: float = 0.5 + max_tokens: int = 1024 + server_vad: bool = True + audio_out: bool = True + input_transcript: bool = True + sample_rate: int = 24000 + + stream_id: int = 0 + dump: bool = False + max_history: int = 20 + enable_storage: bool = False + greeting: str = "" + language: str = "en-US" + + def build_ctx(self) -> dict: + return { + } + + +class GLMRealtimeExtension(AsyncLLMBaseExtension): + + def __init__(self, name: str): + super().__init__(name) + self.ten_env: AsyncTenEnv = None + self.conn = None + self.session = None + self.session_id = None + + self.config: GLMRealtimeConfig = None + self.stopped: bool = False + self.connected: bool = False + self.buffer: bytearray = b"" + self.memory: ChatMemory = None + self.total_usage: LLMUsage = LLMUsage() + self.users_count = 0 + + self.stream_id: int = 0 + self.remote_stream_id: int = 0 + self.channel_name: str = "" + self.audio_len_threshold: int = 5120 + + self.completion_times = [] + self.connect_times = [] + self.first_token_times = [] + + self.transcript: str = "" + self.ctx: dict = {} + self.input_end = time.time() + self.input_audio_queue = asyncio.Queue() + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.ten_env = ten_env + + self.loop = asyncio.get_event_loop() + self.loop.create_task(self._on_process_audio()) + + self.config = await GLMRealtimeConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.api_key: + ten_env.log_error("api_key is required") + return + + try: + self.memory = ChatMemory(self.config.max_history) + + if self.config.enable_storage: + [result, _] = await ten_env.send_cmd(Cmd.create("retrieve")) + if result.get_status_code() == StatusCode.OK: + try: + history = json.loads(result.get_property_string("response")) + for i in history: + self.memory.put(i) + ten_env.log_info(f"on retrieve context {history}") + except Exception as e: + ten_env.log_error(f"Failed to handle retrieve result {e}") + else: + ten_env.log_warn("Failed to retrieve content") + + self.memory.on(EVENT_MEMORY_EXPIRED, self._on_memory_expired) + self.memory.on(EVENT_MEMORY_APPENDED, self._on_memory_appended) + + self.ctx = self.config.build_ctx() + + self.conn = RealtimeApiConnection( + ten_env=ten_env, + base_uri=self.config.base_uri, + path=self.config.path, + api_key=self.config.api_key, + ) + ten_env.log_info("Finish init client") + + self.loop.create_task(self._loop()) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to init client {e}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_info("on_stop") + + self.input_audio_queue.put_nowait(None) + self.stopped = True + + async def on_audio_frame(self, _: AsyncTenEnv, audio_frame: AudioFrame) -> None: + try: + stream_id = audio_frame.get_property_int("stream_id") + if self.channel_name == "": + self.channel_name = audio_frame.get_property_string("channel") + + if self.remote_stream_id == 0: + self.remote_stream_id = stream_id + + frame_buf = audio_frame.get_buf() + self.input_audio_queue.put_nowait(frame_buf) + + if not self.config.server_vad: + self.input_end = time.time() + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"GLMV2VExtension on audio frame failed {e}") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + status = StatusCode.OK + detail = "success" + + if cmd_name == CMD_IN_FLUSH: + # Will only flush if it is client side vad + await self._flush() + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + ten_env.log_info("on flush") + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.users_count == 1: + await self._greeting() + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + # Register tool + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(status) + cmd_result.set_property_string("detail", detail) + await ten_env.return_result(cmd_result, cmd) + + # Not support for now + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + pass + + async def _on_process_audio(self) -> None: + while True: + try: + audio_frame = await self.input_audio_queue.get() + + if audio_frame is None: + break + + self._dump_audio_if_need(audio_frame, Role.User) + if self.connected: + wav_buff = self.convert_to_wav_in_memory(audio_frame) + await self.conn.send_audio_data(wav_buff) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Error processing audio frame {e}") + + async def _loop(self): + def get_time_ms() -> int: + current_time = datetime.now() + return current_time.microsecond // 1000 + + try: + start_time = time.time() + await self.conn.connect() + self.connect_times.append(time.time() - start_time) + item_id = "" # For truncate + response_id = "" + # content_index = 0 + relative_start_ms = get_time_ms() + flushed = set() + + self.ten_env.log_info("Client loop started") + async for message in self.conn.listen(): + try: + # self.ten_env.log_info(f"Received message: {message.type}") + match message: + case SessionCreated(): + self.ten_env.log_info( + f"Session is created: {message.session}" + ) + self.session_id = message.session.id + self.session = message.session + await self._update_session() + + history = self.memory.get() + for h in history: + if h["role"] == "user": + await self.conn.send_request( + ItemCreate( + item=UserMessageItemParam( + content=[ + { + "type": ContentType.InputText, + "text": h["content"], + } + ] + ) + ) + ) + elif h["role"] == "assistant": + await self.conn.send_request( + ItemCreate( + item=AssistantMessageItemParam( + content=[ + { + "type": ContentType.InputText, + "text": h["content"], + } + ] + ) + ) + ) + self.ten_env.log_info(f"Finish send history {history}") + self.memory.clear() + + if not self.connected: + self.connected = True + await self._greeting() + case ItemInputAudioTranscriptionCompleted(): + self.ten_env.log_info( + f"On request transcript {message.transcript}" + ) + self._send_transcript(message.transcript, Role.User, True) + self.memory.put( + { + "role": "user", + "content": message.transcript, + # "id": message.item_id, + } + ) + case ItemInputAudioTranscriptionFailed(): + self.ten_env.log_warn( + f"On request transcript failed {message.item_id} {message.error}" + ) + case ItemCreated(): + self.ten_env.log_info(f"On item created {message.item}") + case ResponseCreated(): + response_id = message.response.id + self.ten_env.log_info(f"On response created {response_id}") + case ResponseDone(): + msg_resp_id = message.response.id + status = message.response.status + if msg_resp_id == response_id: + response_id = "" + self.ten_env.log_info( + f"On response done {msg_resp_id} {status} {message.response.usage}" + ) + + # workaround as GLM does not have responseAudioTranscriptDone + self.transcript = "" + self._send_transcript("", Role.Assistant, True) + + if message.response.usage: + pass + # await self._update_usage(message.response.usage) + case ResponseAudioTranscriptDelta(): + self.ten_env.log_info( + f"On response transcript delta {message.output_index} {message.content_index} {message.delta}" + ) + if message.response_id in flushed: + self.ten_env.log_warn( + f"On flushed transcript delta {message.output_index} {message.content_index} {message.delta}" + ) + continue + self._send_transcript(message.delta, Role.Assistant, False) + case ResponseTextDelta(): + self.ten_env.log_info( + f"On response text delta {message.output_index} {message.content_index} {message.delta}" + ) + # if message.response_id in flushed: + # self.ten_env.log_warn( + # f"On flushed text delta {message.output_index} {message.content_index} {message.delta}" + # ) + # continue + # if item_id != message.item_id: + # item_id = message.item_id + # self.first_token_times.append( + # time.time() - self.input_end + # ) + self._send_transcript(message.delta, Role.Assistant, False) + case ResponseAudioTranscriptDone(): + # this is not triggering by GLM + self.ten_env.log_info( + f"On response transcript done {message.output_index} {message.content_index} {message.transcript}" + ) + if message.response_id in flushed: + self.ten_env.log_warn( + "On flushed transcript done" + ) + continue + self.memory.put( + { + "role": "assistant", + "content": message.transcript, + # "id": message.item_id, + } + ) + self.transcript = "" + self._send_transcript("", Role.Assistant, True) + case ResponseTextDone(): + self.ten_env.log_info( + f"On response text done {message.output_index} {message.content_index} {message.text}" + ) + # if message.response_id in flushed: + # self.ten_env.log_warn( + # f"On flushed text done {message.response_id}" + # ) + # continue + self.completion_times.append(time.time() - self.input_end) + self.transcript = "" + self._send_transcript("", Role.Assistant, True) + case ResponseOutputItemDone(): + self.ten_env.log_info(f"Output item done {message.item}") + case ResponseOutputItemAdded(): + self.ten_env.log_info( + f"Output item added {message.output_index} {message.item}" + ) + case ResponseAudioDelta(): + # if message.response_id in flushed: + # self.ten_env.log_warn( + # f"On flushed audio delta {message.response_id} {message.item_id} {message.content_index}" + # ) + # continue + # if item_id != message.item_id: + # item_id = message.item_id + # self.first_token_times.append( + # time.time() - self.input_end + # ) + # content_index = message.content_index + await self._on_audio_delta(message.delta) + case ResponseAudioDone(): + self.completion_times.append(time.time() - self.input_end) + case InputAudioBufferSpeechStarted(): + self.ten_env.log_info( + f"On server listening, in response {response_id}, last item {item_id}" + ) + # Tuncate the on-going audio stream + # end_ms = get_time_ms() - relative_start_ms + # if item_id: + # truncate = ItemTruncate( + # item_id=item_id, + # content_index=content_index, + # audio_end_ms=end_ms, + # ) + # await self.conn.send_request(truncate) + if self.config.server_vad: + await self._flush() + if response_id and self.transcript: + transcript = self.transcript + "[interrupted]" + self._send_transcript(transcript, Role.Assistant, True) + self.transcript = "" + # memory leak, change to lru later + flushed.add(response_id) + item_id = "" + case InputAudioBufferSpeechStopped(): + # Only for server vad + self.input_end = time.time() + relative_start_ms = get_time_ms() - message.audio_end_ms + self.ten_env.log_info( + f"On server stop listening, {message.audio_end_ms}, relative {relative_start_ms}" + ) + case ResponseFunctionCallArgumentsDone(): + # tool_call_id = message.call_id + name = message.name + arguments = message.arguments + self.ten_env.log_info(f"need to call func {name}") + self.loop.create_task( + self._handle_tool_call(name, arguments) + ) + case ErrorMessage(): + self.ten_env.log_error( + f"Error message received: {message.error}" + ) + case _: + self.ten_env.log_debug(f"Not handled message {message}") + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Error processing message: {message} {e}") + + self.ten_env.log_info("Client loop finished") + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to handle loop {e}") + + # clear so that new session can be triggered + self.connected = False + self.remote_stream_id = 0 + + if not self.stopped: + await self.conn.close() + await asyncio.sleep(0.5) + self.ten_env.log_info("Reconnect") + + self.conn = RealtimeApiConnection( + ten_env=self.ten_env, + base_uri=self.config.base_uri, + path=self.config.path, + api_key=self.config.api_key, + ) + + self.loop.create_task(self._loop()) + + async def _on_memory_expired(self, message: dict) -> None: + self.ten_env.log_info(f"Memory expired: {message}") + item_id = message.get("item_id") + if item_id: + await self.conn.send_request(ItemDelete(item_id=item_id)) + + async def _on_memory_appended(self, message: dict) -> None: + self.ten_env.log_info(f"Memory appended: {message}") + if not self.config.enable_storage: + return + + role = message.get("role") + stream_id = self.remote_stream_id if role == Role.User else 0 + try: + d = Data.create("append") + d.set_property_string("text", message.get("content")) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + asyncio.create_task(self.ten_env.send_data(d)) + except Exception as e: + self.ten_env.log_error(f"Error send append_context data {message} {e}") + + # Direction: IN + def convert_to_wav_in_memory(self, buff: bytearray) -> bytes: + """ + Converts the accumulated PCM data to WAV format in-memory. + Returns the WAV data as bytes. + """ + # Convert PCM data to numpy array of int16 type + pcm_data = np.frombuffer(buff, dtype=np.int16) + + # Use pydub to create an AudioSegment + audio_segment = AudioSegment( + pcm_data.tobytes(), + frame_rate=24000, + sample_width=2, + channels=1 + ) + + # Create an in-memory stream to store the WAV file + memory_stream = io.BytesIO() + + # Export the AudioSegment to the in-memory stream as WAV + audio_segment.export(memory_stream, format="wav") + + # Return the WAV data as bytes + wav_bytes = memory_stream.getvalue() + return wav_bytes + + async def _update_session(self) -> None: + tools = [] + + def tool_dict(tool: LLMToolMetadata): + t = { + "type": "function", + "name": tool.name, + "description": tool.description, + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False, + }, + } + + for param in tool.parameters: + t["parameters"]["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + t["parameters"]["required"].append(param.name) + + return t + + if self.available_tools: + tool_prompt = "You have several tools that you can get help from:\n" + for t in self.available_tools: + tool_prompt += f"- ***{t.name}***: {t.description}" + self.ctx["tools"] = tool_prompt + tools = [tool_dict(t) for t in self.available_tools] + prompt = self._replace(self.config.prompt) + + self.ten_env.log_info(f"update session {prompt} {tools}") + su = SessionUpdate( + session=SessionUpdateParams( + instructions=prompt, + input_audio_format=AudioFormats.WAV24, + output_audio_format=AudioFormats.PCM, + tools=tools, + ) + ) + if self.config.audio_out: + # su.session.voice = self.config.voice + pass + else: + su.session.modalities = ["text"] + + if self.config.input_transcript: + su.session.input_audio_transcription = InputAudioTranscription( + model="whisper-1" + ) + await self.conn.send_request(su) + + async def on_tools_update(self, _: AsyncTenEnv, tool: LLMToolMetadata) -> None: + """Called when a new tool is registered. Implement this method to process the new tool.""" + self.ten_env.log_info(f"on tools update {tool}") + # await self._update_session() + + def _replace(self, prompt: str) -> str: + result = prompt + for token, value in self.ctx.items(): + result = result.replace("{" + token + "}", value) + return result + + # Direction: OUT + async def _on_audio_delta(self, delta: bytes) -> None: + audio_data = base64.b64decode(delta) + self.ten_env.log_debug( + f"on_audio_delta audio_data len {len(audio_data)} samples {len(audio_data) // 2}" + ) + self._dump_audio_if_need(audio_data, Role.Assistant) + + f = AudioFrame.create("pcm_frame") + f.set_sample_rate(self.config.sample_rate) + f.set_bytes_per_sample(2) + f.set_number_of_channels(1) + f.set_data_fmt(AudioFrameDataFmt.INTERLEAVE) + f.set_samples_per_channel(len(audio_data) // 2) + f.alloc_buf(len(audio_data)) + buff = f.lock_buf() + buff[:] = audio_data + f.unlock_buf(buff) + await self.ten_env.send_audio_frame(f) + + def _send_transcript(self, content: str, role: Role, is_final: bool) -> None: + def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + # Check if the current sentence contains non-punctuation characters + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" # Reset for the next sentence + + remain = current_sentence # Any remaining characters form the incomplete sentence + return sentences, remain + + def send_data( + ten_env: AsyncTenEnv, + sentence: str, + stream_id: int, + role: str, + is_final: bool, + ): + try: + d = Data.create("text_data") + d.set_property_string("text", sentence) + d.set_property_bool("end_of_segment", is_final) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + ten_env.log_info( + f"send transcript text [{sentence}] stream_id {stream_id} is_final {is_final} end_of_segment {is_final} role {role}" + ) + asyncio.create_task(ten_env.send_data(d)) + except Exception as e: + ten_env.log_error( + f"Error send text data {role}: {sentence} {is_final} {e}" + ) + + stream_id = self.remote_stream_id if role == Role.User else 0 + try: + if role == Role.Assistant and not is_final: + sentences, self.transcript = parse_sentences(self.transcript, content) + for s in sentences: + send_data(self.ten_env, s, stream_id, role, is_final) + else: + send_data(self.ten_env, content, stream_id, role, is_final) + except Exception as e: + self.ten_env.log_error( + f"Error send text data {role}: {content} {is_final} {e}" + ) + + def _dump_audio_if_need(self, buf: bytearray, role: Role) -> None: + if not self.config.dump: + return + + with open("{}_{}.pcm".format(role, self.channel_name), "ab") as dump_file: + dump_file.write(buf) + + async def _handle_tool_call( + self, name: str, arguments: str + ) -> None: + self.ten_env.log_info(f"_handle_tool_call {name} {arguments}") + cmd: Cmd = Cmd.create(CMD_TOOL_CALL) + cmd.set_property_string("name", name) + cmd.set_property_from_json("arguments", arguments) + [result, _] = await self.ten_env.send_cmd(cmd) + + tool_response = ItemCreate( + item=FunctionCallOutputItemParam( + output='{"success":false}', + ) + ) + if result.get_status_code() == StatusCode.OK: + tool_result: LLMToolResult = json.loads( + result.get_property_to_json(CMD_PROPERTY_RESULT) + ) + + result_content = tool_result["content"] + tool_response.item.output = json.dumps( + self._convert_to_content_parts(result_content) + ) + self.ten_env.log_info(f"tool_result: {tool_result}") + else: + self.ten_env.log_error("Tool call failed") + + await self.conn.send_request(tool_response) + await self.conn.send_request(ResponseCreate()) + self.ten_env.log_info(f"_remote_tool_call finish {name} {arguments}") + + def _greeting_text(self) -> str: + text = "Hi, there." + if self.config.language == "zh-CN": + text = "你好。" + elif self.config.language == "ja-JP": + text = "こんにちは" + elif self.config.language == "ko-KR": + text = "안녕하세요" + return text + + def _convert_tool_params_to_dict(self, tool: LLMToolMetadata): + json_dict = {"type": "object", "properties": {}, "required": []} + + for param in tool.parameters: + json_dict["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + json_dict["required"].append(param.name) + + return json_dict + + def _convert_to_content_parts( + self, content: Iterable[LLMChatCompletionContentPartParam] + ): + content_parts = [] + + if isinstance(content, str): + content_parts.append({"type": "text", "text": content}) + else: + for part in content: + # Only text content is supported currently for v2v model + if part["type"] == "text": + content_parts.append(part) + return content_parts + + async def _greeting(self) -> None: + if self.connected and self.users_count == 1: + # somehow it's not working + text = self._greeting_text() + if self.config.greeting: + text = "Say '" + self.config.greeting + "' to me." + self.ten_env.log_info(f"send greeting {text}") + # await self.conn.send_request( + # ItemCreate( + # item=UserMessageItemParam( + # content=[{"type": ContentType.InputText, "text": text}] + # ) + # ) + # ) + # await self.conn.send_request(ResponseCreate()) + + async def _flush(self) -> None: + try: + c = Cmd.create("flush") + await self.ten_env.send_cmd(c) + except Exception: + self.ten_env.log_error("Error flush") + + async def _update_usage(self, usage: dict) -> None: + self.total_usage.completion_tokens += usage.get("output_tokens") or 0 + self.total_usage.prompt_tokens += usage.get("input_tokens") or 0 + self.total_usage.total_tokens += usage.get("total_tokens") or 0 + if not self.total_usage.completion_tokens_details: + self.total_usage.completion_tokens_details = LLMCompletionTokensDetails() + if not self.total_usage.prompt_tokens_details: + self.total_usage.prompt_tokens_details = LLMPromptTokensDetails() + + if usage.get("output_token_details"): + self.total_usage.completion_tokens_details.accepted_prediction_tokens += ( + usage["output_token_details"].get("text_tokens") + ) + self.total_usage.completion_tokens_details.audio_tokens += usage[ + "output_token_details" + ].get("audio_tokens") + + if usage.get("input_token_details:"): + self.total_usage.prompt_tokens_details.audio_tokens += usage[ + "input_token_details" + ].get("audio_tokens") + self.total_usage.prompt_tokens_details.cached_tokens += usage[ + "input_token_details" + ].get("cached_tokens") + self.total_usage.prompt_tokens_details.text_tokens += usage[ + "input_token_details" + ].get("text_tokens") + + self.ten_env.log_info(f"total usage: {self.total_usage}") + + data = Data.create("llm_stat") + data.set_property_from_json("usage", json.dumps(self.total_usage.model_dump())) + if self.connect_times and self.completion_times and self.first_token_times: + data.set_property_from_json( + "latency", + json.dumps( + { + "connection_latency_95": np.percentile(self.connect_times, 95), + "completion_latency_95": np.percentile( + self.completion_times, 95 + ), + "first_token_latency_95": np.percentile( + self.first_token_times, 95 + ), + "connection_latency_99": np.percentile(self.connect_times, 99), + "completion_latency_99": np.percentile( + self.completion_times, 99 + ), + "first_token_latency_99": np.percentile( + self.first_token_times, 99 + ), + } + ), + ) + asyncio.create_task(self.ten_env.send_data(data)) + + async def on_call_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError + + async def on_data_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError diff --git a/agents/ten_packages/extension/glm_v2v_python/manifest.json b/agents/ten_packages/extension/glm_v2v_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..be1a279f829b15e4d2f7197698b70d65a786da6c --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/manifest.json @@ -0,0 +1,162 @@ +{ + "type": "extension", + "name": "glm_v2v_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "realtime/**.tent", + "realtime/**.py" + ] + }, + "api": { + "property": { + "base_uri": { + "type": "string" + }, + "api_key": { + "type": "string" + }, + "path": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "temperature": { + "type": "float32" + }, + "max_tokens": { + "type": "int32" + }, + "server_vad": { + "type": "bool" + }, + "audio_out": { + "type": "bool" + }, + "input_transcript": { + "type": "bool" + }, + "sample_rate": { + "type": "int32" + }, + "stream_id": { + "type": "int32" + }, + "dump": { + "type": "bool" + }, + "greeting": { + "type": "string" + }, + "max_history": { + "type": "int32" + }, + "enable_storage": { + "type": "bool" + } + }, + "audio_frame_in": [ + { + "name": "pcm_frame", + "property": { + "stream_id": { + "type": "int64" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + }, + { + "name": "append", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + }, + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/glm_v2v_python/property.json b/agents/ten_packages/extension/glm_v2v_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..aabe8d6a07139cf367202383fb7c73c69e3af197 --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/property.json @@ -0,0 +1,11 @@ +{ + "api_key": "${env:GLM_API_KEY}", + "temperature": 0.9, + "max_tokens": 2048, + "server_vad": true, + "dump": false, + "max_history": 10, + "enable_storage": false, + "prompt": "", + "base_uri": "wss://open.bigmodel.cn" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/glm_v2v_python/realtime/__init__.py b/agents/ten_packages/extension/glm_v2v_python/realtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/ten_packages/extension/glm_v2v_python/realtime/connection.py b/agents/ten_packages/extension/glm_v2v_python/realtime/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..6e73770fa06b51068311520e063a1ab144809346 --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/realtime/connection.py @@ -0,0 +1,107 @@ +import asyncio +import base64 +import json +import os +import aiohttp + +from ten import AsyncTenEnv + +from typing import Any, AsyncGenerator +from .struct import InputAudioBufferAppend, ClientToServerMessage, ServerToClientMessage, parse_server_message, to_json + +def smart_str(s: str, max_field_len: int = 128) -> str: + """parse string as json, truncate data field to 128 characters, reserialize""" + try: + data = json.loads(s) + if "delta" in data: + key = "delta" + elif "audio" in data: + key = "audio" + else: + return s + + if len(data[key]) > max_field_len: + data[key] = data[key][:max_field_len] + "..." + return json.dumps(data) + except json.JSONDecodeError: + return s + + +class RealtimeApiConnection: + def __init__( + self, + ten_env: AsyncTenEnv, + base_uri: str, + api_key: str | None = None, + path: str = "/v1/realtime", + verbose: bool = False + ): + self.ten_env = ten_env + self.url = f"{base_uri}{path}" + # if not self.vendor and "model=" not in self.url: + # self.url += f"?model={model}" + + self.api_key = api_key or os.environ.get("GLM_API_KEY") + self.websocket: aiohttp.ClientWebSocketResponse | None = None + self.verbose = verbose + self.session = aiohttp.ClientSession() + + async def __aenter__(self) -> "RealtimeApiConnection": + await self.connect() + return self + + async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> bool: + await self.close() + return False + + async def connect(self): + headers = {} + + headers = {"Authorization": "Bearer " + self.api_key} + + self.websocket = await self.session.ws_connect( + url=self.url, + # auth=auth, + headers=headers, + ) + + async def send_audio_data(self, audio_data: bytes): + """audio_data is assumed to be pcm16 24kHz mono little-endian""" + base64_audio_data = base64.b64encode(audio_data).decode("utf-8") + message = InputAudioBufferAppend(audio=base64_audio_data) + await self.send_request(message) + + async def send_request(self, message: ClientToServerMessage): + assert self.websocket is not None + message_str = to_json(message) + if self.verbose: + self.ten_env.log_info(f"-> {smart_str(message_str)}") + await self.websocket.send_str(message_str) + + async def listen(self) -> AsyncGenerator[ServerToClientMessage, None]: + assert self.websocket is not None + if self.verbose: + self.ten_env.log_info("Listening for realtimeapi messages") + try: + async for msg in self.websocket: + if msg.type == aiohttp.WSMsgType.TEXT: + if self.verbose: + self.ten_env.log_info(f"<- {smart_str(msg.data)}") + yield self.handle_server_message(msg.data) + elif msg.type == aiohttp.WSMsgType.ERROR: + self.ten_env.log_error("Error during receive: %s", self.websocket.exception()) + break + except asyncio.CancelledError: + self.ten_env.log_info("Receive messages task cancelled") + + def handle_server_message(self, message: str) -> ServerToClientMessage: + try: + return parse_server_message(message) + except Exception as e: + self.ten_env.log_info(f"Error handling message {message} {e}") + + async def close(self): + # Close the websocket connection if it exists + if self.websocket: + await self.websocket.close() + self.websocket = None diff --git a/agents/ten_packages/extension/glm_v2v_python/realtime/struct.py b/agents/ten_packages/extension/glm_v2v_python/realtime/struct.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa369ab18656667179ecbf0e91beb7319cc57ec --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/realtime/struct.py @@ -0,0 +1,719 @@ +import json + +from dataclasses import dataclass, asdict, field, is_dataclass +from typing import Any, Dict, Literal, Optional, List, Set, Union +from enum import Enum +import uuid + + +def generate_event_id() -> str: + return str(uuid.uuid4()) + + +class AudioFormats(str, Enum): + WAV24 = "wav24" + PCM = "pcm" + G711_ULAW = "g711_ulaw" + G711_ALAW = "g711_alaw" + +class ItemType(str, Enum): + Message = "message" + FunctionCall = "function_call" + FunctionCallOutput = "function_call_output" + +class MessageRole(str, Enum): + System = "system" + User = "user" + Assistant = "assistant" + +class ContentType(str, Enum): + InputText = "input_text" + InputAudio = "input_audio" + Text = "text" + Audio = "audio" + +class ChatMode(str, Enum): + VideoPassive = "video_passive" + Audio = "audio" + +@dataclass +class FunctionToolChoice: + name: str # Name of the function + type: str = "function" # Fixed value for type + +# ToolChoice can be either a literal string or FunctionToolChoice +ToolChoice = Union[str, FunctionToolChoice] # "none", "auto", "required", or FunctionToolChoice + +@dataclass +class RealtimeError: + type: str # The type of the error + message: str # The error message + code: Optional[str] = None # Optional error code + param: Optional[str] = None # Optional parameter related to the error + event_id: Optional[str] = None # Optional event ID for tracing + +@dataclass +class InputAudioTranscription: + model: str = "whisper-1" # Default transcription model is "whisper-1" + +@dataclass +class ServerVADUpdateParams: + threshold: Optional[float] = None # Threshold for voice activity detection + prefix_padding_ms: Optional[int] = None # Amount of padding before the voice starts (in milliseconds) + silence_duration_ms: Optional[int] = None # Duration of silence before considering speech stopped (in milliseconds) + type: str = "server_vad" # Fixed value for VAD type + + +@dataclass +class BetaFieldsParams: + chat_mode: ChatMode = ChatMode.Audio # Chat mode for the session, defaulting to "audio" + tts_source: Optional[str] = "e2e" # Source of the TTS audio + +@dataclass +class Session: + id: str # The unique identifier for the session + model: str # The model associated with the session (e.g., "gpt-3") + # expires_at: int # Expiration time of the session in seconds since the epoch (UNIX timestamp) + object: str = "realtime.session" # Fixed value indicating the object type + modalities: Set[str] = field(default_factory=lambda: {"text", "audio"}) # Set of allowed modalities (e.g., "text", "audio") + instructions: Optional[str] = None # Instructions or guidance for the session + turn_detection: Optional[ServerVADUpdateParams] = None # Voice activity detection (VAD) settings + input_audio_format: AudioFormats = AudioFormats.PCM # Audio format for input (e.g., "pcm16") + output_audio_format: AudioFormats = AudioFormats.PCM # Audio format for output (e.g., "pcm16") + input_audio_transcription: Optional[InputAudioTranscription] = None # Audio transcription model settings (e.g., "whisper-1") + tools: List[Dict[str, Union[str, Any]]] = field(default_factory=list) # List of tools available during the session + tool_choice: Literal["auto", "none", "required"] = "auto" # How tools should be used in the session + temperature: float = 0.8 # Temperature setting for model creativity + max_response_output_tokens: Union[int, Literal["inf"]] = "inf" # Maximum number of tokens in the response, or "inf" for unlimited + + +@dataclass +class SessionUpdateParams: + # model: Optional[str] = None # Optional string to specify the model + # modalities: Optional[Set[str]] = None # Set of allowed modalities (e.g., "text", "audio") + instructions: Optional[str] = None # Optional instructions string + # voice: Optional[Voices] = None # Voice selection, can be `None` or from `Voices` Enum + turn_detection: ServerVADUpdateParams = field(default_factory=ServerVADUpdateParams) # VAD update parameters + input_audio_format: Optional[AudioFormats] = None # Input audio format from `AudioFormats` Enum + output_audio_format: Optional[AudioFormats] = None # Output audio format from `AudioFormats` Enum + # input_audio_transcription: Optional[InputAudioTranscription] = None # Optional transcription model + tools: Optional[List[Dict[str, Union[str, any]]]] = None # List of tools (e.g., dictionaries) + # tool_choice: Optional[ToolChoice] = None # ToolChoice, either string or `FunctionToolChoice` + # temperature: Optional[float] = None # Optional temperature for response generation + # max_response_output_tokens: Optional[Union[int, str]] = None # Max response tokens, "inf" for infinite + beta_fields: BetaFieldsParams = field(default_factory=BetaFieldsParams) # Beta fields for additional settings + + +# Define individual message item param types +@dataclass +class SystemMessageItemParam: + content: List[dict] # This can be more specific based on content structure + id: Optional[str] = None + status: Optional[str] = None + type: str = "message" + role: str = "system" + +@dataclass +class UserMessageItemParam: + content: List[dict] # Similarly, content can be more specific + id: Optional[str] = None + status: Optional[str] = None + type: str = "message" + role: str = "user" + +@dataclass +class AssistantMessageItemParam: + content: List[dict] # Content structure here depends on your schema + id: Optional[str] = None + status: Optional[str] = None + type: str = "message" + role: str = "assistant" + +@dataclass +class FunctionCallItemParam: + name: str + call_id: str + arguments: str + type: str = "function_call" + id: Optional[str] = None + status: Optional[str] = None + +@dataclass +class FunctionCallOutputItemParam: + # call_id: str + output: str + id: Optional[str] = None + type: str = "function_call_output" + +# Union of all possible item types +ItemParam = Union[ + SystemMessageItemParam, + UserMessageItemParam, + AssistantMessageItemParam, + FunctionCallItemParam, + FunctionCallOutputItemParam +] + + +# Assuming the EventType and other enums are already defined +# For reference: +class EventType(str, Enum): + SESSION_UPDATE = "session.update" + INPUT_AUDIO_BUFFER_APPEND = "input_audio_buffer.append" + INPUT_AUDIO_BUFFER_COMMIT = "input_audio_buffer.commit" + INPUT_AUDIO_BUFFER_CLEAR = "input_audio_buffer.clear" + UPDATE_CONVERSATION_CONFIG = "update_conversation_config" + ITEM_CREATE = "conversation.item.create" + ITEM_TRUNCATE = "conversation.item.truncate" + ITEM_DELETE = "conversation.item.delete" + RESPONSE_CREATE = "response.create" + RESPONSE_CANCEL = "response.cancel" + + ERROR = "error" + SESSION_CREATED = "session.created" + SESSION_UPDATED = "session.updated" + + INPUT_AUDIO_BUFFER_COMMITTED = "input_audio_buffer.committed" + INPUT_AUDIO_BUFFER_CLEARED = "input_audio_buffer.cleared" + INPUT_AUDIO_BUFFER_SPEECH_STARTED = "input_audio_buffer.speech_started" + INPUT_AUDIO_BUFFER_SPEECH_STOPPED = "input_audio_buffer.speech_stopped" + + ITEM_CREATED = "conversation.item.created" + ITEM_DELETED = "conversation.item.deleted" + ITEM_TRUNCATED = "conversation.item.truncated" + ITEM_INPUT_AUDIO_TRANSCRIPTION_COMPLETED = "conversation.item.input_audio_transcription.completed" + ITEM_INPUT_AUDIO_TRANSCRIPTION_FAILED = "conversation.item.input_audio_transcription.failed" + + RESPONSE_CREATED = "response.created" + RESPONSE_CANCELLED = "response.cancelled" + RESPONSE_DONE = "response.done" + RESPONSE_OUTPUT_ITEM_ADDED = "response.output_item.added" + RESPONSE_OUTPUT_ITEM_DONE = "response.output_item.done" + RESPONSE_CONTENT_PART_ADDED = "response.content_part.added" + RESPONSE_CONTENT_PART_DONE = "response.content_part.done" + RESPONSE_TEXT_DELTA = "response.text.delta" + RESPONSE_TEXT_DONE = "response.text.done" + RESPONSE_AUDIO_TRANSCRIPT_DELTA = "response.audio_transcript.delta" + RESPONSE_AUDIO_TRANSCRIPT_DONE = "response.audio_transcript.done" + RESPONSE_AUDIO_DELTA = "response.audio.delta" + RESPONSE_AUDIO_DONE = "response.audio.done" + RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA = "response.function_call_arguments.delta" + RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE = "response.function_call_arguments.done" + RATE_LIMITS_UPDATED = "rate_limits.updated" + +# Base class for all ServerToClientMessages +@dataclass +class ServerToClientMessage: + event_id: str + + +@dataclass +class ErrorMessage(ServerToClientMessage): + error: RealtimeError + type: str = EventType.ERROR + + +@dataclass +class SessionCreated(ServerToClientMessage): + session: Session + type: str = EventType.SESSION_CREATED + + +@dataclass +class SessionUpdated(ServerToClientMessage): + session: Session + type: str = EventType.SESSION_UPDATED + + +@dataclass +class InputAudioBufferCommitted(ServerToClientMessage): + item_id: str + type: str = EventType.INPUT_AUDIO_BUFFER_COMMITTED + previous_item_id: Optional[str] = None + + +@dataclass +class InputAudioBufferCleared(ServerToClientMessage): + type: str = EventType.INPUT_AUDIO_BUFFER_CLEARED + + +@dataclass +class InputAudioBufferSpeechStarted(ServerToClientMessage): + audio_start_ms: int + # item_id: str + type: str = EventType.INPUT_AUDIO_BUFFER_SPEECH_STARTED + + +@dataclass +class InputAudioBufferSpeechStopped(ServerToClientMessage): + audio_end_ms: int + type: str = EventType.INPUT_AUDIO_BUFFER_SPEECH_STOPPED + # item_id: Optional[str] = None + + +@dataclass +class ItemCreated(ServerToClientMessage): + item: ItemParam + type: str = EventType.ITEM_CREATED + previous_item_id: Optional[str] = None + + +@dataclass +class ItemTruncated(ServerToClientMessage): + item_id: str + content_index: int + audio_end_ms: int + type: str = EventType.ITEM_TRUNCATED + + +@dataclass +class ItemDeleted(ServerToClientMessage): + item_id: str + type: str = EventType.ITEM_DELETED + + +# Assuming the necessary enums, ItemParam, and other classes are defined above +# ResponseStatus could be a string or an enum, depending on your schema + +# Enum or Literal for ResponseStatus (could be more extensive) +ResponseStatus = Union[str, Literal["in_progress", "completed", "cancelled", "incomplete", "failed"]] + +# Define status detail classes +@dataclass +class ResponseCancelledDetails: + reason: str # e.g., "turn_detected", "client_cancelled" + type: str = "cancelled" + +@dataclass +class ResponseIncompleteDetails: + reason: str # e.g., "max_output_tokens", "content_filter" + type: str = "incomplete" + +@dataclass +class ResponseError: + type: str # The type of the error, e.g., "validation_error", "server_error" + message: str # The error message describing what went wrong + code: Optional[str] = None # Optional error code, e.g., HTTP status code, API error code + +@dataclass +class ResponseFailedDetails: + error: ResponseError # Assuming ResponseError is already defined + type: str = "failed" + +# Union of possible status details +ResponseStatusDetails = Union[ResponseCancelledDetails, ResponseIncompleteDetails, ResponseFailedDetails] + +# Define Usage class to handle token usage +@dataclass +class InputTokenDetails: + cached_tokens: int + text_tokens: int + audio_tokens: int + +@dataclass +class OutputTokenDetails: + text_tokens: int + audio_tokens: int + +@dataclass +class Usage: + total_tokens: int + input_tokens: int + output_tokens: int + input_token_details: InputTokenDetails + output_token_details: OutputTokenDetails + +# The Response dataclass definition +@dataclass +class Response: + id: str # Unique ID for the response + output: List[ItemParam] = field(default_factory=list) # List of items in the response + object: str = "realtime.response" # Fixed value for object type + status: ResponseStatus = "in_progress" # Status of the response + status_details: Optional[ResponseStatusDetails] = None # Additional details based on status + usage: Optional[Usage] = None # Token usage information + metadata: Optional[Dict[str, Any]] = None # Additional metadata for the response + + + +@dataclass +class ResponseCreated(ServerToClientMessage): + response: Response + type: str = EventType.RESPONSE_CREATED + + +@dataclass +class ResponseDone(ServerToClientMessage): + response: Response + type: str = EventType.RESPONSE_DONE + + +@dataclass +class ResponseTextDelta(ServerToClientMessage): + output_index: int + content_index: int + delta: str + type: str = EventType.RESPONSE_TEXT_DELTA + + +@dataclass +class ResponseTextDone(ServerToClientMessage): + output_index: int + content_index: int + text: str + type: str = EventType.RESPONSE_TEXT_DONE + + +@dataclass +class ResponseAudioTranscriptDelta(ServerToClientMessage): + response_id: str + output_index: int + content_index: int + delta: str + type: str = EventType.RESPONSE_AUDIO_TRANSCRIPT_DELTA + + +@dataclass +class ResponseAudioTranscriptDone(ServerToClientMessage): + response_id: str + output_index: int + content_index: int + transcript: str + type: str = EventType.RESPONSE_AUDIO_TRANSCRIPT_DONE + + +@dataclass +class ResponseAudioDelta(ServerToClientMessage): + output_index: int + content_index: int + delta: str + type: str = EventType.RESPONSE_AUDIO_DELTA + + +@dataclass +class ResponseAudioDone(ServerToClientMessage): + output_index: int + content_index: int + type: str = EventType.RESPONSE_AUDIO_DONE + + +@dataclass +class ResponseFunctionCallArgumentsDelta(ServerToClientMessage): + output_index: int + call_id: str + delta: str + type: str = EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA + + +@dataclass +class ResponseFunctionCallArgumentsDone(ServerToClientMessage): + output_index: int + # call_id: str + name: str + arguments: str + type: str = EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE + + +@dataclass +class RateLimitDetails: + name: str # Name of the rate limit, e.g., "api_requests", "message_generation" + limit: int # The maximum number of allowed requests in the current time window + remaining: int # The number of requests remaining in the current time window + reset_seconds: float # The number of seconds until the rate limit resets + +@dataclass +class RateLimitsUpdated(ServerToClientMessage): + rate_limits: List[RateLimitDetails] + type: str = EventType.RATE_LIMITS_UPDATED + + +@dataclass +class ResponseOutputItemAdded(ServerToClientMessage): + response_id: str # The ID of the response + output_index: int # Index of the output item in the response + item: Union[ItemParam, None] # The added item (can be a message, function call, etc.) + type: str = EventType.RESPONSE_OUTPUT_ITEM_ADDED # Fixed event type + +@dataclass +class ResponseContentPartAdded(ServerToClientMessage): + response_id: str # The ID of the response + item_id: str # The ID of the item to which the content part was added + output_index: int # Index of the output item in the response + content_index: int # Index of the content part in the output + part: Union[ItemParam, None] # The added content part + content: Union[ItemParam, None] = None # The added content part for azure + type: str = EventType.RESPONSE_CONTENT_PART_ADDED # Fixed event type + +@dataclass +class ResponseContentPartDone(ServerToClientMessage): + response_id: str # The ID of the response + item_id: str # The ID of the item to which the content part belongs + output_index: int # Index of the output item in the response + content_index: int # Index of the content part in the output + part: Union[ItemParam, None] # The content part that was completed + content: Union[ItemParam, None] = None # The added content part for azure + type: str = EventType.RESPONSE_CONTENT_PART_ADDED # Fixed event type + +@dataclass +class ResponseOutputItemDone(ServerToClientMessage): + response_id: str # The ID of the response + output_index: int # Index of the output item in the response + item: Union[ItemParam, None] # The output item that was completed + type: str = EventType.RESPONSE_OUTPUT_ITEM_DONE # Fixed event type + +@dataclass +class ItemInputAudioTranscriptionCompleted(ServerToClientMessage): + content_index: int # Index of the content part that was transcribed + transcript: str # The transcribed text + type: str = EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_COMPLETED # Fixed event type + +@dataclass +class ItemInputAudioTranscriptionFailed(ServerToClientMessage): + content_index: int # Index of the content part that failed to transcribe + error: ResponseError # Error details explaining the failure + type: str = EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_FAILED # Fixed event type + +# Union of all server-to-client message types +ServerToClientMessages = Union[ + ErrorMessage, + SessionCreated, + SessionUpdated, + InputAudioBufferCommitted, + InputAudioBufferCleared, + InputAudioBufferSpeechStarted, + InputAudioBufferSpeechStopped, + ItemCreated, + ItemTruncated, + ItemDeleted, + ResponseCreated, + ResponseDone, + ResponseTextDelta, + ResponseTextDone, + ResponseAudioTranscriptDelta, + ResponseAudioTranscriptDone, + ResponseAudioDelta, + ResponseAudioDone, + ResponseFunctionCallArgumentsDelta, + ResponseFunctionCallArgumentsDone, + RateLimitsUpdated, + ResponseOutputItemAdded, + ResponseContentPartAdded, + ResponseContentPartDone, + ResponseOutputItemDone, + ItemInputAudioTranscriptionCompleted, + ItemInputAudioTranscriptionFailed +] + + + +# Base class for all ClientToServerMessages +@dataclass +class ClientToServerMessage: + event_id: str = field(default_factory=generate_event_id) + + +@dataclass +class InputAudioBufferAppend(ClientToServerMessage): + audio: Optional[str] = field(default=None) + type: str = EventType.INPUT_AUDIO_BUFFER_APPEND # Default argument (has a default value) + +@dataclass +class InputAudioBufferCommit(ClientToServerMessage): + type: str = EventType.INPUT_AUDIO_BUFFER_COMMIT + + +@dataclass +class InputAudioBufferClear(ClientToServerMessage): + type: str = EventType.INPUT_AUDIO_BUFFER_CLEAR + + +@dataclass +class ItemCreate(ClientToServerMessage): + item: Optional[ItemParam] = field(default=None) # Assuming `ItemParam` is already defined + type: str = EventType.ITEM_CREATE + previous_item_id: Optional[str] = None + + +@dataclass +class ItemTruncate(ClientToServerMessage): + item_id: Optional[str] = field(default=None) + content_index: Optional[int] = field(default=None) + audio_end_ms: Optional[int] = field(default=None) + type: str = EventType.ITEM_TRUNCATE + + +@dataclass +class ItemDelete(ClientToServerMessage): + item_id: Optional[str] = field(default=None) + type: str = EventType.ITEM_DELETE + +@dataclass +class ResponseCreateParams: + commit: bool = True # Whether the generated messages should be appended to the conversation + cancel_previous: bool = True # Whether to cancel the previous pending generation + append_input_items: Optional[List[ItemParam]] = None # Messages to append before response generation + input_items: Optional[List[ItemParam]] = None # Initial messages to use for generation + modalities: Optional[Set[str]] = None # Allowed modalities (e.g., "text", "audio") + instructions: Optional[str] = None # Instructions or guidance for the model + # voice: Optional[Voices] = None # Voice setting for audio output + output_audio_format: Optional[AudioFormats] = None # Format for the audio output + tools: Optional[List[Dict[str, Any]]] = None # Tools available for this response + tool_choice: Optional[ToolChoice] = None # How to choose the tool ("auto", "required", etc.) + temperature: Optional[float] = None # The randomness of the model's responses + max_response_output_tokens: Optional[Union[int, str]] = None # Max number of tokens for the output, "inf" for infinite + + +@dataclass +class ResponseCreate(ClientToServerMessage): + type: str = EventType.RESPONSE_CREATE + response: Optional[ResponseCreateParams] = None # Assuming `ResponseCreateParams` is defined + + +@dataclass +class ResponseCancel(ClientToServerMessage): + type: str = EventType.RESPONSE_CANCEL + +DEFAULT_CONVERSATION = "default" + +@dataclass +class UpdateConversationConfig(ClientToServerMessage): + type: str = EventType.UPDATE_CONVERSATION_CONFIG + label: str = DEFAULT_CONVERSATION + subscribe_to_user_audio: Optional[bool] = None + # voice: Optional[Voices] = None + system_message: Optional[str] = None + temperature: Optional[float] = None + max_tokens: Optional[int] = None + tools: Optional[List[dict]] = None + tool_choice: Optional[ToolChoice] = None + disable_audio: Optional[bool] = None + output_audio_format: Optional[AudioFormats] = None + + +@dataclass +class SessionUpdate(ClientToServerMessage): + session: Optional[SessionUpdateParams] = field(default=None) # Assuming `SessionUpdateParams` is defined + type: str = EventType.SESSION_UPDATE + + +# Union of all client-to-server message types +ClientToServerMessages = Union[ + InputAudioBufferAppend, + InputAudioBufferCommit, + InputAudioBufferClear, + ItemCreate, + ItemTruncate, + ItemDelete, + ResponseCreate, + ResponseCancel, + UpdateConversationConfig, + SessionUpdate +] + +def from_dict(data_class, data): + """Recursively convert a dictionary to a dataclass instance.""" + if is_dataclass(data_class): # Check if the target class is a dataclass + fieldtypes = {f.name: f.type for f in data_class.__dataclass_fields__.values()} + # Filter out keys that are not in the dataclass fields + valid_data = {f: data[f] for f in fieldtypes if f in data} + return data_class(**{f: from_dict(fieldtypes[f], valid_data[f]) for f in valid_data}) + elif isinstance(data, list): # Handle lists of nested dataclass objects + return [from_dict(data_class.__args__[0], item) for item in data] + else: # For primitive types (str, int, float, etc.), return the value as-is + return data + +def parse_client_message(unparsed_string: str) -> ClientToServerMessage: + data = json.loads(unparsed_string) + + # Dynamically select the correct message class based on the `type` field, using from_dict + if data["type"] == EventType.INPUT_AUDIO_BUFFER_APPEND: + return from_dict(InputAudioBufferAppend, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_COMMIT: + return from_dict(InputAudioBufferCommit, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_CLEAR: + return from_dict(InputAudioBufferClear, data) + elif data["type"] == EventType.ITEM_CREATE: + return from_dict(ItemCreate, data) + elif data["type"] == EventType.ITEM_TRUNCATE: + return from_dict(ItemTruncate, data) + elif data["type"] == EventType.ITEM_DELETE: + return from_dict(ItemDelete, data) + elif data["type"] == EventType.RESPONSE_CREATE: + return from_dict(ResponseCreate, data) + elif data["type"] == EventType.RESPONSE_CANCEL: + return from_dict(ResponseCancel, data) + elif data["type"] == EventType.UPDATE_CONVERSATION_CONFIG: + return from_dict(UpdateConversationConfig, data) + elif data["type"] == EventType.SESSION_UPDATE: + return from_dict(SessionUpdate, data) + + raise ValueError(f"Unknown message type: {data['type']}") + + +# Assuming all necessary classes and enums (EventType, ServerToClientMessages, etc.) are imported +# Here’s how you can dynamically parse a server-to-client message based on the `type` field: + +def parse_server_message(unparsed_string: str) -> ServerToClientMessage: + data = json.loads(unparsed_string) + + # Dynamically select the correct message class based on the `type` field, using from_dict + if data["type"] == EventType.ERROR: + return from_dict(ErrorMessage, data) + elif data["type"] == EventType.SESSION_CREATED: + return from_dict(SessionCreated, data) + elif data["type"] == EventType.SESSION_UPDATED: + return from_dict(SessionUpdated, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_COMMITTED: + return from_dict(InputAudioBufferCommitted, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_CLEARED: + return from_dict(InputAudioBufferCleared, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_SPEECH_STARTED: + return from_dict(InputAudioBufferSpeechStarted, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_SPEECH_STOPPED: + return from_dict(InputAudioBufferSpeechStopped, data) + elif data["type"] == EventType.ITEM_CREATED: + return from_dict(ItemCreated, data) + elif data["type"] == EventType.ITEM_TRUNCATED: + return from_dict(ItemTruncated, data) + elif data["type"] == EventType.ITEM_DELETED: + return from_dict(ItemDeleted, data) + elif data["type"] == EventType.RESPONSE_CREATED: + return from_dict(ResponseCreated, data) + elif data["type"] == EventType.RESPONSE_DONE: + return from_dict(ResponseDone, data) + elif data["type"] == EventType.RESPONSE_TEXT_DELTA: + return from_dict(ResponseTextDelta, data) + elif data["type"] == EventType.RESPONSE_TEXT_DONE: + return from_dict(ResponseTextDone, data) + elif data["type"] == EventType.RESPONSE_AUDIO_TRANSCRIPT_DELTA: + return from_dict(ResponseAudioTranscriptDelta, data) + elif data["type"] == EventType.RESPONSE_AUDIO_TRANSCRIPT_DONE: + return from_dict(ResponseAudioTranscriptDone, data) + elif data["type"] == EventType.RESPONSE_AUDIO_DELTA: + return from_dict(ResponseAudioDelta, data) + elif data["type"] == EventType.RESPONSE_AUDIO_DONE: + return from_dict(ResponseAudioDone, data) + elif data["type"] == EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA: + return from_dict(ResponseFunctionCallArgumentsDelta, data) + elif data["type"] == EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE: + return from_dict(ResponseFunctionCallArgumentsDone, data) + elif data["type"] == EventType.RATE_LIMITS_UPDATED: + return from_dict(RateLimitsUpdated, data) + elif data["type"] == EventType.RESPONSE_OUTPUT_ITEM_ADDED: + return from_dict(ResponseOutputItemAdded, data) + elif data["type"] == EventType.RESPONSE_CONTENT_PART_ADDED: + return from_dict(ResponseContentPartAdded, data) + elif data["type"] == EventType.RESPONSE_CONTENT_PART_DONE: + return from_dict(ResponseContentPartDone, data) + elif data["type"] == EventType.RESPONSE_OUTPUT_ITEM_DONE: + return from_dict(ResponseOutputItemDone, data) + elif data["type"] == EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_COMPLETED: + return from_dict(ItemInputAudioTranscriptionCompleted, data) + elif data["type"] == EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_FAILED: + return from_dict(ItemInputAudioTranscriptionFailed, data) + + raise ValueError(f"Unknown message type: {data['type']}") + +def to_json(obj: Union[ClientToServerMessage, ServerToClientMessage]) -> str: + # ignore none value + return json.dumps(asdict(obj, dict_factory=lambda x: {k: v for (k, v) in x if v is not None})) \ No newline at end of file diff --git a/agents/ten_packages/extension/glm_v2v_python/requirements.txt b/agents/ten_packages/extension/glm_v2v_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2984efb6a4f6a2ea002d2bc6f59c474b0aacc23 --- /dev/null +++ b/agents/ten_packages/extension/glm_v2v_python/requirements.txt @@ -0,0 +1,6 @@ +asyncio +pydantic +numpy==1.26.4 +sounddevice==0.4.7 +pydub==0.25.1 +aiohttp \ No newline at end of file diff --git a/agents/ten_packages/extension/glue_python_async/README.md b/agents/ten_packages/extension/glue_python_async/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7be443b17c2ed0081d740454568c1399e4f6a90 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/README.md @@ -0,0 +1,37 @@ +# glue_python_async + +This is a python extension for glue service. The schema of glue service is attached in `schema.yml`. + +An example of OpenAI wrapper is also attached in `examples/openai_wrapper.py`. + +## Features + +The extension will record history with count of `max_history`. + +- `api_url` (must have): the url for the glue service. +- `token` (must have): use Bearer token to support default auth + +The extension support flush that will close the existing http session. + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + +- In: + - `text_data` [data]: the asr result + - `flush` [cmd]: the flush signal +- Out: + - `flush` [cmd]: the flush signal + +## Examples + +You can run example using following command, and the wrapper service will listen 8000 by default. + +``` +> export API_TOKEN="xxx" && export OPENAI_API_KEY="xxx" && python3 openai_wrapper.py + +INFO: Started server process [162886] +INFO: Waiting for application startup. +INFO: Application startup complete. +INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) +``` diff --git a/agents/ten_packages/extension/glue_python_async/__init__.py b/agents/ten_packages/extension/glue_python_async/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c22fdd7cd08a97dfe876dad2d4bb86e7e30f0e15 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/__init__.py @@ -0,0 +1,7 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon + diff --git a/agents/ten_packages/extension/glue_python_async/addon.py b/agents/ten_packages/extension/glue_python_async/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..3f33c4c2460bd16391c57f428d7cb4c37b7a55ed --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("glue_python_async") +class AsyncGlueExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import AsyncGlueExtension + ten_env.log_info("AsyncGlueExtensionAddon on_create_instance") + ten_env.on_create_instance_done(AsyncGlueExtension(name), context) diff --git a/agents/ten_packages/extension/glue_python_async/examples/openai_wrapper.py b/agents/ten_packages/extension/glue_python_async/examples/openai_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..55e4053f8643329d1051b89ebabf9e50c90daba5 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/examples/openai_wrapper.py @@ -0,0 +1,183 @@ +import os +import openai +import json +from openai import AsyncOpenAI +import traceback +import logging +import logging.config + +from typing import List, Union, Dict, Optional +from pydantic import BaseModel, HttpUrl, ValidationError + +from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials +from fastapi.responses import StreamingResponse, JSONResponse +from fastapi import Depends, FastAPI, HTTPException, Request +import asyncio + +# Set up logging +logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s", + }, + }, + "handlers": { + "file": { + "level": "DEBUG", + "formatter": "default", + "class": "logging.FileHandler", + "filename": "example.log", + }, + }, + "loggers": { + "": { + "handlers": ["file"], + "level": "DEBUG", + "propagate": True, + }, + }, +}) +logger = logging.getLogger(__name__) + +app = FastAPI(title="Chat Completion API", + description="API for streaming chat completions with support for text, image, and audio content", + version="1.0.0") + +# Set your OpenAI API key +openai.api_key = os.getenv("OPENAI_API_KEY") + +class TextContent(BaseModel): + type: str = "text" + text: str + +class ImageContent(BaseModel): + type: str = "image" + image_url: HttpUrl + +class AudioContent(BaseModel): + type: str = "input_audio" + input_audio: Dict[str, str] + +class ToolFunction(BaseModel): + name: str + description: Optional[str] + parameters: Optional[Dict] + strict: bool = False + +class Tool(BaseModel): + type: str = "function" + function: ToolFunction + +class ToolChoice(BaseModel): + type: str = "function" + function: Optional[Dict] + +class ResponseFormat(BaseModel): + type: str = "json_schema" + json_schema: Optional[Dict[str, str]] + +class SystemMessage(BaseModel): + role: str = "system" + content: Union[str, List[str]] + +class UserMessage(BaseModel): + role: str = "user" + content: Union[str, List[Union[TextContent, ImageContent, AudioContent]]] + +class AssistantMessage(BaseModel): + role: str = "assistant" + content: Union[str, List[TextContent]] = None + audio: Optional[Dict[str, str]] = None + tool_calls: Optional[List[Dict]] = None + +class ToolMessage(BaseModel): + role: str = "tool" + content: Union[str, List[str]] + tool_call_id: str + +class ChatCompletionRequest(BaseModel): + context: Optional[Dict] = None + model: Optional[str] = None + messages: List[Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]] + response_format: Optional[ResponseFormat] = None + modalities: List[str] = ["text"] + audio: Optional[Dict[str, str]] = None + tools: Optional[List[Tool]] = None + tool_choice: Optional[Union[str, ToolChoice]] = "auto" + parallel_tool_calls: bool = True + stream: bool = True + stream_options: Optional[Dict] = None + +''' +{'messages': [{'role': 'user', 'content': 'Hello. Hello. Hello.'}, {'role': 'user', 'content': 'Unprocessedable.'}], 'tools': [], 'tools_choice': 'none', 'model': 'gpt-3.5-turbo', 'stream': True} +''' + +security = HTTPBearer() + +def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)): + token = credentials.credentials + if token != os.getenv("API_TOKEN"): + logger.warning("Invalid or missing token") + raise HTTPException(status_code=403, detail="Invalid or missing token") + +@app.post("/chat/completions", dependencies=[Depends(verify_token)]) +async def create_chat_completion(request: ChatCompletionRequest, req: Request): + try: + logger.debug(f"Received request: {request.json()}") + client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY")) + response = await client.chat.completions.create( + model=request.model, + messages=request.messages, # Directly use request messages + tool_choice=request.tool_choice if request.tools and request.tool_choice else None, + tools=request.tools if request.tools else None, + # modalities=request.modalities, + response_format=request.response_format, + stream=request.stream, + stream_options=request.stream_options + ) + + if request.stream: + async def generate(): + try: + async for chunk in response: + logger.info(f"Received chunk: {chunk}") + yield f"data: {json.dumps(chunk.to_dict())}\n\n" + yield "data: [DONE]\n\n" + except asyncio.CancelledError: + logger.info("Request was cancelled") + raise + + return StreamingResponse(generate(), media_type="text/event-stream") + else: + result = await response + return result + except asyncio.CancelledError: + logger.info("Request was cancelled") + raise HTTPException(status_code=499, detail="Request was cancelled") + except Exception as e: + traceback_str = ''.join(traceback.format_tb(e.__traceback__)) + error_message = f"{str(e)}\n{traceback_str}" + logger.error(error_message) + raise HTTPException(status_code=500, detail=error_message) + +if __name__ == "__main__": + import uvicorn + from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials + from fastapi import Depends + import traceback + + ''' + http_proxy = os.getenv("HTTP_PROXY") + https_proxy = os.getenv("HTTPS_PROXY") + + if http_proxy or https_proxy: + proxies = { + "http": http_proxy, + "https": https_proxy + } + openai.proxy = proxies + ''' + + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/agents/ten_packages/extension/glue_python_async/examples/requirements.txt b/agents/ten_packages/extension/glue_python_async/examples/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0cfcb8ddcda04ec8ccabfe242d1fed761c2535b0 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/examples/requirements.txt @@ -0,0 +1,3 @@ +uvicorn +fastapi +openai \ No newline at end of file diff --git a/agents/ten_packages/extension/glue_python_async/extension.py b/agents/ten_packages/extension/glue_python_async/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa8cddbc9568e3c7b3b5e6d908631e1d5275a05 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/extension.py @@ -0,0 +1,579 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import asyncio +import traceback +import aiohttp +import json +import time +import re + +import numpy as np +from typing import List, Any, AsyncGenerator +from dataclasses import dataclass, field +from pydantic import BaseModel + +from ten import ( + AudioFrame, + VideoFrame, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) + +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ( + ChatMemory, + EVENT_MEMORY_APPENDED, +) +from ten_ai_base.usage import ( + LLMUsage, + LLMCompletionTokensDetails, + LLMPromptTokensDetails, +) +from ten_ai_base.types import ( + LLMChatCompletionUserMessageParam, + LLMToolResult, + LLMCallCompletionArgs, + LLMDataCompletionArgs, + LLMToolMetadata, +) +from ten_ai_base.llm import ( + AsyncLLMBaseExtension, +) + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" +CMD_OUT_TOOL_CALL = "tool_call" + +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" + +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT = "end_of_segment" + +CMD_PROPERTY_RESULT = "tool_result" + + +def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + +def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" + + remain = current_sentence + return sentences, remain + + +class ToolCallFunction(BaseModel): + name: str | None = None + arguments: str | None = None + + +class ToolCall(BaseModel): + index: int + type: str = "function" + id: str | None = None + function: ToolCallFunction + + +class ToolCallResponse(BaseModel): + id: str + response: LLMToolResult + error: str | None = None + + +class Delta(BaseModel): + content: str | None = None + tool_calls: List[ToolCall] = None + + +class Choice(BaseModel): + delta: Delta = None + index: int + finish_reason: str | None + + +class ResponseChunk(BaseModel): + choices: List[Choice] + usage: LLMUsage | None = None + + +@dataclass +class GlueConfig(BaseConfig): + api_url: str = "http://localhost:8000/chat/completions" + token: str = "" + prompt: str = "" + max_history: int = 10 + greeting: str = "" + failure_info: str = "" + modalities: List[str] = field(default_factory=lambda: ["text"]) + rtm_enabled: bool = True + ssml_enabled: bool = False + context_enabled: bool = False + extra_context: dict = field(default_factory=dict) + enable_storage: bool = False + + +class AsyncGlueExtension(AsyncLLMBaseExtension): + def __init__(self, name): + super().__init__(name) + + self.config: GlueConfig = None + self.ten_env: AsyncTenEnv = None + self.loop: asyncio.AbstractEventLoop = None + self.stopped: bool = False + self.memory: ChatMemory = None + self.total_usage: LLMUsage = LLMUsage() + self.users_count = 0 + + self.completion_times = [] + self.connect_times = [] + self.first_token_times = [] + + self.remote_stream_id: int = 999 + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + + self.loop = asyncio.get_event_loop() + + self.config = await GlueConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + self.memory = ChatMemory(self.config.max_history) + + if self.config.enable_storage: + [result, _] = await ten_env.send_cmd(Cmd.create("retrieve")) + if result.get_status_code() == StatusCode.OK: + try: + history = json.loads(result.get_property_string("response")) + for i in history: + self.memory.put(i) + ten_env.log_info(f"on retrieve context {history}") + except Exception: + ten_env.log_error("Failed to handle retrieve result {e}") + else: + ten_env.log_warn("Failed to retrieve content") + + self.memory.on(EVENT_MEMORY_APPENDED, self._on_memory_appended) + + self.ten_env = ten_env + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + self.stopped = True + await self.queue.put(None) + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + status = StatusCode.OK + detail = "success" + + if cmd_name == CMD_IN_FLUSH: + await self.flush_input_items(ten_env) + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + ten_env.log_info("on flush") + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.config.greeting and self.users_count == 1: + self.send_text_output(ten_env, self.config.greeting, True) + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(status) + cmd_result.set_property_string("detail", detail) + await ten_env.return_result(cmd_result, cmd) + + async def on_call_chat_completion( + self, ten_env: AsyncTenEnv, **kargs: LLMCallCompletionArgs + ) -> any: + raise RuntimeError("Not implemented") + + async def on_data_chat_completion( + self, ten_env: AsyncTenEnv, **kargs: LLMDataCompletionArgs + ) -> None: + input_messages: LLMChatCompletionUserMessageParam = kargs.get("messages", []) + + messages = [] + if self.config.prompt: + messages.append({"role": "system", "content": self.config.prompt}) + + history = self.memory.get() + while history: + if history[0].get("role") == "tool": + history = history[1:] + continue + if history[0].get("role") == "assistant" and history[0].get("tool_calls"): + history = history[1:] + continue + + # Skip the first tool role + break + + messages.extend(history) + + if not input_messages: + ten_env.log_warn("No message in data") + else: + messages.extend(input_messages) + for i in input_messages: + self.memory.put(i) + + def tool_dict(tool: LLMToolMetadata): + json_dict = { + "type": "function", + "function": { + "name": tool.name, + "description": tool.description, + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False, + }, + }, + "strict": True, + } + + for param in tool.parameters: + json_dict["function"]["parameters"]["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + json_dict["function"]["parameters"]["required"].append(param.name) + + return json_dict + + def trim_xml(input_string): + return re.sub(r"<[^>]+>", "", input_string).strip() + + tools = [] + for tool in self.available_tools: + tools.append(tool_dict(tool)) + + total_output = "" + sentence_fragment = "" + calls = {} + + sentences = [] + start_time = time.time() + first_token_time = None + response = self._stream_chat(messages=messages, tools=tools) + async for message in response: + self.ten_env.log_debug(f"content: {message}") + try: + c = ResponseChunk(**message) + if c.choices: + if c.choices[0].delta.content: + if first_token_time is None: + first_token_time = time.time() + self.first_token_times.append(first_token_time - start_time) + + content = c.choices[0].delta.content + if self.config.ssml_enabled and content.startswith(""): + content = trim_xml(content) + total_output += content + sentences, sentence_fragment = parse_sentences( + sentence_fragment, content + ) + for s in sentences: + await self._send_text(s) + if c.choices[0].delta.tool_calls: + self.ten_env.log_info( + f"tool_calls: {c.choices[0].delta.tool_calls}" + ) + for call in c.choices[0].delta.tool_calls: + if call.index not in calls: + calls[call.index] = ToolCall( + id=call.id, + index=call.index, + function=ToolCallFunction(name="", arguments=""), + ) + if call.function.name: + calls[call.index].function.name += call.function.name + if call.function.arguments: + calls[ + call.index + ].function.arguments += call.function.arguments + if c.usage: + self.ten_env.log_info(f"usage: {c.usage}") + await self._update_usage(c.usage) + except Exception as e: + self.ten_env.log_error(f"Failed to parse response: {message} {e}") + traceback.print_exc() + if sentence_fragment: + await self._send_text(sentence_fragment) + end_time = time.time() + self.completion_times.append(end_time - start_time) + + if total_output: + self.memory.put({"role": "assistant", "content": total_output}) + + if calls: + tasks = [] + tool_calls = [] + for _, call in calls.items(): + self.ten_env.log_info(f"tool call: {call}") + tool_calls.append(call.model_dump()) + tasks.append(self.handle_tool_call(call)) + self.memory.put({"role": "assistant", "tool_calls": tool_calls}) + responses = await asyncio.gather(*tasks) + for r in responses: + content = r.response["content"] + self.ten_env.log_info(f"tool call response: {content} {r.id}") + self.memory.put( + { + "role": "tool", + "content": json.dumps(content), + "tool_call_id": r.id, + } + ) + + # request again to let the model know the tool call results + await self.on_data_chat_completion(ten_env) + + self.ten_env.log_info(f"total_output: {total_output} {calls}") + + async def on_tools_update( + self, ten_env: AsyncTenEnv, tool: LLMToolMetadata + ) -> None: + # Implement the logic for tool updates + return await super().on_tools_update(ten_env, tool) + + async def handle_tool_call(self, call: ToolCall) -> ToolCallResponse: + cmd: Cmd = Cmd.create(CMD_OUT_TOOL_CALL) + cmd.set_property_string("name", call.function.name) + cmd.set_property_from_json("arguments", call.function.arguments) + + # Send the command and handle the result through the future + [result, _] = await self.ten_env.send_cmd(cmd) + if result.get_status_code() == StatusCode.OK: + tool_result: LLMToolResult = json.loads( + result.get_property_to_json(CMD_PROPERTY_RESULT) + ) + + self.ten_env.log_info(f"tool_result: {call} {tool_result}") + return ToolCallResponse(id=call.id, response=tool_result) + else: + self.ten_env.log_error("Tool call failed") + return ToolCallResponse( + id=call.id, + error=f"Tool call failed with status code {result.get_status_code()}", + ) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + data_name = data.get_name() + ten_env.log_info(f"on_data name {data_name}") + + is_final = False + input_text = "" + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + except Exception as err: + ten_env.log_info( + f"GetProperty optional {DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL} failed, err: {err}" + ) + + try: + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + except Exception as err: + ten_env.log_info( + f"GetProperty optional {DATA_IN_TEXT_DATA_PROPERTY_TEXT} failed, err: {err}" + ) + + if not is_final: + ten_env.log_info("ignore non-final input") + return + if not input_text: + ten_env.log_info("ignore empty text") + return + + ten_env.log_info(f"OnData input text: [{input_text}]") + + # Start an asynchronous task for handling chat completion + message = LLMChatCompletionUserMessageParam(role="user", content=input_text) + await self.queue_input_item(False, messages=[message]) + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + pass + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + pass + + async def _send_text(self, text: str) -> None: + data = Data.create("text_data") + data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, text) + data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT, True) + asyncio.create_task(self.ten_env.send_data(data)) + + async def _stream_chat( + self, messages: List[Any], tools: List[Any] + ) -> AsyncGenerator[dict, None]: + async with aiohttp.ClientSession() as session: + try: + payload = { + "messages": messages, + "tools": tools, + "tools_choice": "auto" if tools else "none", + "model": "gpt-3.5-turbo", + "stream": True, + "stream_options": {"include_usage": True}, + "ssml_enabled": self.config.ssml_enabled, + } + if self.config.context_enabled: + payload["context"] = {**self.config.extra_context} + self.ten_env.log_info(f"payload before sending: {json.dumps(payload)}") + headers = { + "Authorization": f"Bearer {self.config.token}", + "Content-Type": "application/json", + } + + start_time = time.time() + async with session.post( + self.config.api_url, json=payload, headers=headers + ) as response: + if response.status != 200: + r = await response.json() + self.ten_env.log_error( + f"Received unexpected status {r} from the server." + ) + if self.config.failure_info: + await self._send_text(self.config.failure_info) + return + end_time = time.time() + self.connect_times.append(end_time - start_time) + + async for line in response.content: + if line: + l = line.decode("utf-8").strip() + if l.startswith("data:"): + content = l[5:].strip() + if content == "[DONE]": + break + self.ten_env.log_debug(f"content: {content}") + yield json.loads(content) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to handle {e}") + finally: + await session.close() + session = None + + async def _update_usage(self, usage: LLMUsage) -> None: + if not self.config.rtm_enabled: + return + + self.total_usage.completion_tokens += usage.completion_tokens + self.total_usage.prompt_tokens += usage.prompt_tokens + self.total_usage.total_tokens += usage.total_tokens + + if self.total_usage.completion_tokens_details is None: + self.total_usage.completion_tokens_details = LLMCompletionTokensDetails() + if self.total_usage.prompt_tokens_details is None: + self.total_usage.prompt_tokens_details = LLMPromptTokensDetails() + + if usage.completion_tokens_details: + self.total_usage.completion_tokens_details.accepted_prediction_tokens += ( + usage.completion_tokens_details.accepted_prediction_tokens + ) + self.total_usage.completion_tokens_details.audio_tokens += ( + usage.completion_tokens_details.audio_tokens + ) + self.total_usage.completion_tokens_details.reasoning_tokens += ( + usage.completion_tokens_details.reasoning_tokens + ) + self.total_usage.completion_tokens_details.rejected_prediction_tokens += ( + usage.completion_tokens_details.rejected_prediction_tokens + ) + + if usage.prompt_tokens_details: + self.total_usage.prompt_tokens_details.audio_tokens += ( + usage.prompt_tokens_details.audio_tokens + ) + self.total_usage.prompt_tokens_details.cached_tokens += ( + usage.prompt_tokens_details.cached_tokens + ) + + self.ten_env.log_info(f"total usage: {self.total_usage}") + + data = Data.create("llm_stat") + data.set_property_from_json("usage", json.dumps(self.total_usage.model_dump())) + if self.connect_times and self.completion_times and self.first_token_times: + data.set_property_from_json( + "latency", + json.dumps( + { + "connection_latency_95": np.percentile(self.connect_times, 95), + "completion_latency_95": np.percentile( + self.completion_times, 95 + ), + "first_token_latency_95": np.percentile( + self.first_token_times, 95 + ), + "connection_latency_99": np.percentile(self.connect_times, 99), + "completion_latency_99": np.percentile( + self.completion_times, 99 + ), + "first_token_latency_99": np.percentile( + self.first_token_times, 99 + ), + } + ), + ) + asyncio.create_task(self.ten_env.send_data(data)) + + async def _on_memory_appended(self, message: dict) -> None: + self.ten_env.log_info(f"Memory appended: {message}") + if not self.config.enable_storage: + return + + role = message.get("role") + stream_id = self.remote_stream_id if role == "user" else 0 + try: + d = Data.create("append") + d.set_property_string("text", message.get("content")) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + asyncio.create_task(self.ten_env.send_data(d)) + except Exception as e: + self.ten_env.log_error(f"Error send append_context data {message} {e}") diff --git a/agents/ten_packages/extension/glue_python_async/manifest.json b/agents/ten_packages/extension/glue_python_async/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..c81d7eea3e6f4a9b027418ada1419afdf2734c52 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/manifest.json @@ -0,0 +1,107 @@ +{ + "type": "extension", + "name": "glue_python_async", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "token": { + "type": "string" + }, + "api_url": { + "type": "string" + }, + "user_id": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "failure_info": { + "type": "string" + }, + "modalities": { + "type": "array", + "items": { + "type": "string" + } + }, + "rtm_enabled": { + "type": "bool" + }, + "ssml_enabled": { + "type": "bool" + }, + "context_enabled": { + "type": "bool" + }, + "extra_context": { + "type": "object", + "properties": {} + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + }, + { + "name": "llm_stat", + "property": { + "usage": { + "type": "object", + "properties": {} + }, + "latency": { + "type": "object", + "properties": {} + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/glue_python_async/property.json b/agents/ten_packages/extension/glue_python_async/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/glue_python_async/requirements.txt b/agents/ten_packages/extension/glue_python_async/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..296d654528b719e554528b956c4bf5a1516e812c --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/requirements.txt @@ -0,0 +1 @@ +numpy \ No newline at end of file diff --git a/agents/ten_packages/extension/glue_python_async/schema.yml b/agents/ten_packages/extension/glue_python_async/schema.yml new file mode 100644 index 0000000000000000000000000000000000000000..37a2b0b5d8bb4726b88fc29e2d98c14e0e862637 --- /dev/null +++ b/agents/ten_packages/extension/glue_python_async/schema.yml @@ -0,0 +1,391 @@ +openapi: 3.0.0 +info: + title: Streaming Chat Completion API with Multimedia Support + version: 1.0.0 + description: API for streaming chat completions with support for text, image, and audio content + +paths: + /chat/completions: + post: + summary: Create a streaming chat completion + description: Streams a chat completion response + operationId: createChatCompletion + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ChatCompletionRequest" + responses: + "200": + description: Successful response + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/ChatCompletionResponse" + - $ref: "#/components/schemas/ChatCompletionChunk" + x-stream: true + +components: + schemas: + ChatCompletionRequest: + type: object + required: + - messages + properties: + context: + type: object + model: + type: string + example: "gpt-4o" + messages: + type: array + items: + oneOf: + - $ref: "#/components/schemas/SystemMessage" + - $ref: "#/components/schemas/UserMessage" + - $ref: "#/components/schemas/AssistantMessage" + - $ref: "#/components/schemas/ToolMessage" + response_format: + $ref: "#/components/schemas/ResponseFormat" + modalities: + type: array + items: + type: string + enum: [text, audio] + default: [text] + audio: + type: object + properties: + voice: + type: string + tools: + type: array + items: + $ref: "#/components/schemas/Tool" + tool_choice: + oneOf: + - type: string + enum: [auto, none, required] + - type: object + properties: + type: + type: string + enum: [function] + function: + type: object + parallel_tool_calls: + type: boolean + default: true + stream: + type: boolean + default: true + ssml_enabled: + type: boolean + default: false + + SystemMessage: + type: object + required: + - role + - content + properties: + name: + type: string + role: + type: string + enum: [system] + content: + oneOf: + - type: string + - type: array + items: + type: string + + UserMessage: + type: object + required: + - role + - content + properties: + name: + type: string + role: + type: string + enum: [user] + content: + oneOf: + - type: string + - type: array + items: + oneOf: + - $ref: "#/components/schemas/TextContent" + - $ref: "#/components/schemas/ImageContent" + - $ref: "#/components/schemas/AudioContent" + + AssistantMessage: + type: object + required: + - role + - content + properties: + name: + type: string + role: + type: string + enum: [system] + audio: + type: object + properties: + id: + type: string + content: + oneOf: + - type: string + - type: array + items: + $ref: "#/components/schemas/TextContent" + tool_calls: + type: object + properties: + id: + type: string + type: + type: string + enum: [function] + function: + type: object + properties: + name: + type: string + arguments: + type: string + + ToolMessage: + type: object + required: + - role + - content + - tool_call_id + properties: + role: + type: string + enum: [tool] + content: + oneOf: + - type: string + - type: array + items: + type: string + tool_call_id: + type: string + + TextContent: + type: object + required: + - type + - text + properties: + type: + type: string + enum: [text] + text: + type: string + + ImageContent: + type: object + required: + - type + - image_url + properties: + type: + type: string + enum: [image_url] + image_url: + type: string + format: uri + + AudioContent: + type: object + required: + - type + - input_audio + properties: + type: + type: string + enum: [input_audio] + input_audio: + type: object + properties: + data: + type: string + format: + type: string + + Tool: + type: object + properties: + type: + type: string + enum: [function] + function: + type: object + required: + - name + properties: + name: + type: string + description: + type: string + parameters: + type: object + strict: + type: boolean + default: false + + ResponseFormat: + type: object + properties: + type: + type: string + enum: [json_schema] + json_schema: + type: object + properties: + name: + type: string + schema: + type: object + + ChatCompletionResponse: + type: object + properties: + id: + type: string + object: + type: string + created: + type: integer + model: + type: string + usage: + $ref: "#/components/schemas/Usage" + choices: + type: array + items: + $ref: "#/components/schemas/Choice" + + ChatCompletionChunk: + type: object + properties: + id: + type: string + object: + type: string + created: + type: integer + model: + type: string + usage: + $ref: "#/components/schemas/Usage" + choices: + type: array + items: + $ref: "#/components/schemas/DeltaChoice" + + Usage: + type: object + properties: + completion_tokens: + type: integer + prompt_tokens: + type: integer + total_tokens: + type: integer + completion_tokens_details: + type: object + properties: + accepted_prediction_tokens: + type: integer + audio_tokens: + type: integer + reasoning_tokens: + type: integer + rejected_prediction_tokens: + type: integer + prompt_tokens_details: + type: object + properties: + audio_tokens: + type: integer + cached_tokens: + type: integer + + Choice: + type: object + properties: + message: + $ref: "#/components/schemas/ResponseMessage" + index: + type: integer + finish_reason: + type: string + + DeltaChoice: + type: object + properties: + delta: + $ref: "#/components/schemas/ResponseMessage" + index: + type: integer + finish_reason: + type: string + + Delta: + type: object + properties: + content: + type: string + + ResponseMessage: + type: object + properties: + content: + type: string + refusal: + type: string + tool_calls: + $ref: "#/components/schemas/ToolCall" + role: + type: string + audio: + $ref: "#/components/schemas/Audio" + + ToolCall: + type: object + properties: + id: + type: string + type: + type: string + enum: [function] + function: + type: object + properties: + name: + type: string + arguments: + type: string + + Audio: + type: object + properties: + id: + type: string + expires_at: + type: integer + data: + type: string + transcript: + type: string diff --git a/agents/ten_packages/extension/interrupt_detector/extension.go b/agents/ten_packages/extension/interrupt_detector/extension.go new file mode 100644 index 0000000000000000000000000000000000000000..2048edbd5e6c0fe4cece9130df2843489eaa9c61 --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector/extension.go @@ -0,0 +1,71 @@ +/** + * + * Agora Real Time Engagement + * Created by Wei Hu in 2022-10. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "fmt" + + "ten_framework/ten" +) + +const ( + textDataTextField = "text" + textDataFinalField = "is_final" + + cmdNameFlush = "flush" +) + +type interruptDetectorExtension struct { + ten.DefaultExtension +} + +func newExtension(name string) ten.Extension { + return &interruptDetectorExtension{} +} + +// OnData receives data from ten graph. +// current supported data: +// - name: text_data +// example: +// {name: text_data, properties: {text: "hello", is_final: false} +func (p *interruptDetectorExtension) OnData( + tenEnv ten.TenEnv, + data ten.Data, +) { + text, err := data.GetPropertyString(textDataTextField) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("OnData GetProperty %s error: %v", textDataTextField, err)) + return + } + + final, err := data.GetPropertyBool(textDataFinalField) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("OnData GetProperty %s error: %v", textDataFinalField, err)) + return + } + + tenEnv.LogDebug(fmt.Sprintf("OnData %s: %s %s: %t", textDataTextField, text, textDataFinalField, final)) + + if final || len(text) >= 2 { + flushCmd, _ := ten.NewCmd(cmdNameFlush) + tenEnv.SendCmd(flushCmd, nil) + + tenEnv.LogInfo(fmt.Sprintf("sent cmd: %s", cmdNameFlush)) + } +} + +func init() { + // Register addon + ten.RegisterAddonAsExtension( + "interrupt_detector", + ten.NewDefaultExtensionAddon(newExtension), + ) +} diff --git a/agents/ten_packages/extension/interrupt_detector/go.mod b/agents/ten_packages/extension/interrupt_detector/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..4f384c298f69dd2bb56bf26603c53cfad48af865 --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector/go.mod @@ -0,0 +1,7 @@ +module interrupt_detector + +go 1.20 + +replace ten_framework => ../../system/ten_runtime_go/interface + +require ten_framework v0.0.0-00010101000000-000000000000 diff --git a/agents/ten_packages/extension/interrupt_detector/manifest.json b/agents/ten_packages/extension/interrupt_detector/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..dc3412c62b9a49ec903429925517743b241280ea --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector/manifest.json @@ -0,0 +1,32 @@ +{ + "type": "extension", + "name": "interrupt_detector", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + } + ], + "api": { + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/interrupt_detector/property.json b/agents/ten_packages/extension/interrupt_detector/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/interrupt_detector_python/__init__.py b/agents/ten_packages/extension/interrupt_detector_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bdddf6e75e6164b26d5829c2f381f85694ca16a0 --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector_python/__init__.py @@ -0,0 +1 @@ +from . import addon \ No newline at end of file diff --git a/agents/ten_packages/extension/interrupt_detector_python/addon.py b/agents/ten_packages/extension/interrupt_detector_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..4090b4f34fb1ae0657bcc1012d0abe15c2d1e89c --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector_python/addon.py @@ -0,0 +1,22 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024-07. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + +@register_addon_as_extension("interrupt_detector_python") +class InterruptDetectorExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + ten.log_info("on_create_instance") + + from .extension import InterruptDetectorExtension + + ten.on_create_instance_done(InterruptDetectorExtension(addon_name), context) diff --git a/agents/ten_packages/extension/interrupt_detector_python/extension.py b/agents/ten_packages/extension/interrupt_detector_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ad1e7b11093e04d6c1e83d5dfe16f1e5398a34 --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector_python/extension.py @@ -0,0 +1,97 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024-07. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from ten import ( + Extension, + TenEnv, + Cmd, + Data, + StatusCode, + CmdResult, +) + +CMD_NAME_FLUSH = "flush" + +TEXT_DATA_TEXT_FIELD = "text" +TEXT_DATA_FINAL_FIELD = "is_final" + + +class InterruptDetectorExtension(Extension): + def on_start(self, ten: TenEnv) -> None: + ten.log_info("on_start") + ten.on_start_done() + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("on_stop") + ten.on_stop_done() + + def send_flush_cmd(self, ten: TenEnv) -> None: + flush_cmd = Cmd.create(CMD_NAME_FLUSH) + ten.send_cmd( + flush_cmd, + lambda ten, result, _: ten.log_info("send_cmd done"), + ) + + ten.log_info(f"sent cmd: {CMD_NAME_FLUSH}") + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten.log_info("on_cmd name {}".format(cmd_name)) + + # flush whatever cmd incoming at the moment + self.send_flush_cmd(ten) + + # then forward the cmd to downstream + cmd_json = cmd.get_property_to_json() + new_cmd = Cmd.create(cmd_name) + new_cmd.set_property_from_json(None, cmd_json) + ten.send_cmd( + new_cmd, + lambda ten, result, _: ten.log_info("send_cmd done"), + ) + + cmd_result = CmdResult.create(StatusCode.OK) + ten.return_result(cmd_result, cmd) + + def on_data(self, ten: TenEnv, data: Data) -> None: + """ + on_data receives data from ten graph. + current supported data: + - name: text_data + example: + {name: text_data, properties: {text: "hello", is_final: false} + """ + ten.log_info("on_data") + + try: + text = data.get_property_string(TEXT_DATA_TEXT_FIELD) + except Exception as e: + ten.log_warn( + f"on_data get_property_string {TEXT_DATA_TEXT_FIELD} error: {e}" + ) + return + + try: + final = data.get_property_bool(TEXT_DATA_FINAL_FIELD) + except Exception as e: + ten.log_warn( + f"on_data get_property_bool {TEXT_DATA_FINAL_FIELD} error: {e}" + ) + return + + ten.log_debug( + f"on_data {TEXT_DATA_TEXT_FIELD}: {text} {TEXT_DATA_FINAL_FIELD}: {final}" + ) + + if final or len(text) >= 2: + self.send_flush_cmd(ten) + + d = Data.create("text_data") + d.set_property_bool(TEXT_DATA_FINAL_FIELD, final) + d.set_property_string(TEXT_DATA_TEXT_FIELD, text) + ten.send_data(d) diff --git a/agents/ten_packages/extension/interrupt_detector_python/manifest.json b/agents/ten_packages/extension/interrupt_detector_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..262b066233660c09fdadc38eaa56242b94922237 --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector_python/manifest.json @@ -0,0 +1,45 @@ +{ + "type": "extension", + "name": "interrupt_detector_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/interrupt_detector_python/property.json b/agents/ten_packages/extension/interrupt_detector_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/interrupt_detector_python/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/llama_index_chat_engine/__init__.py b/agents/ten_packages/extension/llama_index_chat_engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c731cdd58a08acbf5c417ff0e4e5fea0afcb0f --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/__init__.py @@ -0,0 +1 @@ +from . import addon diff --git a/agents/ten_packages/extension/llama_index_chat_engine/addon.py b/agents/ten_packages/extension/llama_index_chat_engine/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..9ef17901d8034246a369a2c29edf116f047c91b0 --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/addon.py @@ -0,0 +1,14 @@ +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("llama_index_chat_engine") +class LlamaIndexChatEngineExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context): + from .extension import LlamaIndexExtension + + ten.log_info("on_create_instance") + ten.on_create_instance_done(LlamaIndexExtension(addon_name), context) diff --git a/agents/ten_packages/extension/llama_index_chat_engine/extension.py b/agents/ten_packages/extension/llama_index_chat_engine/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..8ad2f4b8eca73a900f3bac91c5839d4ab36910e4 --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/extension.py @@ -0,0 +1,272 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-05. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Extension, + TenEnv, + Cmd, + Data, + StatusCode, + CmdResult, +) +import queue, threading +from datetime import datetime + +PROPERTY_CHAT_MEMORY_TOKEN_LIMIT = "chat_memory_token_limit" +PROPERTY_GREETING = "greeting" + +TASK_TYPE_CHAT_REQUEST = "chat_request" +TASK_TYPE_GREETING = "greeting" + + +class LlamaIndexExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + self.queue = queue.Queue() + self.thread = None + self.stop = False + + self.outdate_ts = datetime.now() + self.outdate_ts_lock = threading.Lock() + + self.collection_name = "" + self.chat_memory_token_limit = 3000 + self.chat_memory = None + + def _send_text_data(self, ten: TenEnv, text: str, end_of_segment: bool): + try: + output_data = Data.create("text_data") + output_data.set_property_string("text", text) + output_data.set_property_bool("end_of_segment", end_of_segment) + ten.send_data(output_data) + ten.log_info(f"text [{text}] end_of_segment {end_of_segment} sent") + except Exception as err: + ten.log_info( + f"text [{text}] end_of_segment {end_of_segment} send failed, err {err}" + ) + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("on_start") + + greeting = None + try: + greeting = ten.get_property_string(PROPERTY_GREETING) + except Exception as err: + ten.log_warn(f"get {PROPERTY_GREETING} property failed, err: {err}") + + try: + self.chat_memory_token_limit = ten.get_property_int( + PROPERTY_CHAT_MEMORY_TOKEN_LIMIT + ) + except Exception as err: + ten.log_warn( + f"get {PROPERTY_CHAT_MEMORY_TOKEN_LIMIT} property failed, err: {err}" + ) + + self.thread = threading.Thread(target=self.async_handle, args=[ten]) + self.thread.start() + + # enable chat memory + from llama_index.core.storage.chat_store import SimpleChatStore + from llama_index.core.memory import ChatMemoryBuffer + + self.chat_memory = ChatMemoryBuffer.from_defaults( + token_limit=self.chat_memory_token_limit, + chat_store=SimpleChatStore(), + ) + + # Send greeting if available + if greeting is not None: + self._send_text_data(ten, greeting, True) + + ten.on_start_done() + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("on_stop") + + self.stop = True + self.flush() + self.queue.put(None) + if self.thread is not None: + self.thread.join() + self.thread = None + self.chat_memory = None + + ten.on_stop_done() + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + + cmd_name = cmd.get_name() + ten.log_info("on_cmd {cmd_name}") + if cmd_name == "file_chunked": + coll = cmd.get_property_string("collection") + + # only update selected collection if empty + if len(self.collection_name) == 0: + ten.log_info( + f"collection for querying has been updated from {self.collection_name} to {coll}" + ) + self.collection_name = coll + else: + ten.log_info( + f"new collection {coll} incoming but won't change current collection_name {self.collection_name}" + ) + + # notify user + file_chunked_text = "Your document has been processed. You can now start asking questions about your document. " + # self._send_text_data(ten, file_chunked_text, True) + self.queue.put((file_chunked_text, datetime.now(), TASK_TYPE_GREETING)) + elif cmd_name == "file_chunk": + self.collection_name = "" # clear current collection + + # notify user + file_chunk_text = "Your document has been received. Please wait a moment while we process it for you. " + # self._send_text_data(ten, file_chunk_text, True) + self.queue.put((file_chunk_text, datetime.now(), TASK_TYPE_GREETING)) + elif cmd_name == "update_querying_collection": + coll = cmd.get_property_string("collection") + ten.log_info( + f"collection for querying has been updated from {self.collection_name} to {coll}" + ) + self.collection_name = coll + + # notify user + update_querying_collection_text = "Your document has been updated. " + if len(self.collection_name) > 0: + update_querying_collection_text += ( + "You can now start asking questions about your document. " + ) + # self._send_text_data(ten, update_querying_collection_text, True) + self.queue.put( + (update_querying_collection_text, datetime.now(), TASK_TYPE_GREETING) + ) + + elif cmd_name == "flush": + self.flush() + ten.send_cmd(Cmd.create("flush"), None) + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "ok") + ten.return_result(cmd_result, cmd) + + def on_data(self, ten: TenEnv, data: Data) -> None: + is_final = data.get_property_bool("is_final") + if not is_final: + ten.log_info("on_data ignore non final") + return + + inputText = data.get_property_string("text") + if len(inputText) == 0: + ten.log_info("on_data ignore empty text") + return + + ts = datetime.now() + + ten.log_info("on_data text [%s], ts [%s]", inputText, ts) + self.queue.put((inputText, ts, TASK_TYPE_CHAT_REQUEST)) + + def async_handle(self, ten: TenEnv): + ten.log_info("async_handle started") + while not self.stop: + try: + value = self.queue.get() + if value is None: + break + input_text, ts, task_type = value + + if ts < self.get_outdated_ts(): + ten.log_info( + f"text [{input_text}] ts [{ts}] task_type [{task_type}] dropped due to outdated" + ) + continue + + if task_type == TASK_TYPE_GREETING: + # send greeting text directly + self._send_text_data(ten, input_text, True) + continue + + ten.log_info("process input text [%s] ts [%s]", input_text, ts) + + # lazy import packages which requires long time to load + from .llama_llm import LlamaLLM + from .llama_retriever import LlamaRetriever + + # prepare chat engine + chat_engine = None + if len(self.collection_name) > 0: + from llama_index.core.chat_engine import ContextChatEngine + + chat_engine = ContextChatEngine.from_defaults( + llm=LlamaLLM(ten=ten), + retriever=LlamaRetriever(ten=ten, coll=self.collection_name), + memory=self.chat_memory, + system_prompt=( + # "You are an expert Q&A system that is trusted around the world.\n" + "You are a voice assistant who talks in a conversational way and can chat with me like my friends. \n" + "I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. \n" + "Don’t talk like a robot, instead I would like you to talk like a real human with emotions. \n" + "I will use your answer for text-to-speech, so don’t return me any meaningless characters. \n" + "I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. \n" + "When giving me a list of options, express the options in a narrative way instead of bullet points.\n" + "Always answer the query using the provided context information, " + "and not prior knowledge.\n" + "Some rules to follow:\n" + "1. Never directly reference the given context in your answer.\n" + "2. Avoid statements like 'Based on the context, ...' or " + "'The context information ...' or anything along " + "those lines." + ), + ) + else: + from llama_index.core.chat_engine import SimpleChatEngine + + chat_engine = SimpleChatEngine.from_defaults( + llm=LlamaLLM(ten=ten), + system_prompt=( + "You are a voice assistant who talks in a conversational way and can chat with me like my friends. \n" + "I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. \n" + "Don’t talk like a robot, instead I would like you to talk like a real human with emotions. \n" + "I will use your answer for text-to-speech, so don’t return me any meaningless characters. \n" + "I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. \n" + "When giving me a list of options, express the options in a narrative way instead of bullet points.\n" + ), + memory=self.chat_memory, + ) + + resp = chat_engine.stream_chat(input_text) + for cur_token in resp.response_gen: + if self.stop: + break + if ts < self.get_outdated_ts(): + ten.log_info( + "stream_chat coming responses dropped due to outdated for input text [%s] ts [%s] ", + input_text, + ts, + ) + break + text = str(cur_token) + + # send out + self._send_text_data(ten, text, False) + + # send out end_of_segment + self._send_text_data(ten, "", True) + except Exception as e: + ten.log_error(str(e)) + ten.log_info("async_handle stoped") + + def flush(self): + with self.outdate_ts_lock: + self.outdate_ts = datetime.now() + + while not self.queue.empty(): + self.queue.get() + + def get_outdated_ts(self): + with self.outdate_ts_lock: + return self.outdate_ts + diff --git a/agents/ten_packages/extension/llama_index_chat_engine/llama_embedding.py b/agents/ten_packages/extension/llama_index_chat_engine/llama_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..02ad23f0ae9c833e702e525eda001a40e3e76bb8 --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/llama_embedding.py @@ -0,0 +1,63 @@ +from typing import Any, List +import threading +from llama_index.core.embeddings import BaseEmbedding +import json +from ten import ( + Cmd, + CmdResult, + TenEnv, +) + +EMBED_CMD = "embed" + + +def embed_from_resp(cmd_result: CmdResult) -> List[float]: + embedding_output_json = cmd_result.get_property_to_json("embedding") + return json.loads(embedding_output_json) + + +class LlamaEmbedding(BaseEmbedding): + ten: Any + + def __init__(self, ten: TenEnv): + """Creates a new Llama embedding interface.""" + super().__init__() + self.ten = ten + + @classmethod + def class_name(cls) -> str: + return "llama_embedding" + + async def _aget_query_embedding(self, query: str) -> List[float]: + return self._get_query_embedding(query) + + async def _aget_text_embedding(self, text: str) -> List[float]: + return self._get_text_embedding(text) + + def _get_query_embedding(self, query: str) -> List[float]: + self.ten.log_info(f"LlamaEmbedding generate embeddings for the query: {query}") + wait_event = threading.Event() + resp: List[float] + + def callback(_, result, __): + nonlocal resp + nonlocal wait_event + + self.ten.log_debug("LlamaEmbedding embedding received") + resp = embed_from_resp(result) + wait_event.set() + + cmd_out = Cmd.create(EMBED_CMD) + cmd_out.set_property_string("input", query) + + self.ten.send_cmd(cmd_out, callback) + wait_event.wait() + return resp + + def _get_text_embedding(self, text: str) -> List[float]: + return self._get_query_embedding(text) + + # for texts embedding, will not be called in this module + def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: + self.ten.log_warn("not implemented") + return [] diff --git a/agents/ten_packages/extension/llama_index_chat_engine/llama_llm.py b/agents/ten_packages/extension/llama_index_chat_engine/llama_llm.py new file mode 100644 index 0000000000000000000000000000000000000000..2c37344442450b0c5771b9965141ad1107314dad --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/llama_llm.py @@ -0,0 +1,144 @@ +from typing import Any, Sequence +import json, queue +import threading + +from llama_index.core.base.llms.types import ( + LLMMetadata, + MessageRole, + ChatMessage, + ChatResponse, + CompletionResponse, + ChatResponseGen, + CompletionResponseGen, +) + +from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback + +from llama_index.core.llms.custom import CustomLLM +from ten import Cmd, StatusCode, CmdResult, TenEnv + + +def chat_from_llama_response(cmd_result: CmdResult) -> ChatResponse | None: + status = cmd_result.get_status_code() + if status != StatusCode.OK: + return None + text_data = cmd_result.get_property_string("text") + return ChatResponse(message=ChatMessage(content=text_data)) + + +def _messages_str_from_chat_messages(messages: Sequence[ChatMessage]) -> str: + messages_list = [] + for message in messages: + messages_list.append( + {"role": message.role, "content": "{}".format(message.content)} + ) + return json.dumps(messages_list, ensure_ascii=False) + + +class LlamaLLM(CustomLLM): + ten: Any + + def __init__(self, ten: TenEnv): + """Creates a new Llama model interface.""" + super().__init__() + self.ten = ten + + @property + def metadata(self) -> LLMMetadata: + return LLMMetadata( + context_window=1024, + num_output=512, + model_name="llama_llm", + is_chat_model=True, + ) + + @llm_chat_callback() + def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: + self.ten.log_debug("LlamaLLM chat start") + + resp: ChatResponse + wait_event = threading.Event() + + def callback(_, result, __): + self.ten.log_debug("LlamaLLM chat callback done") + nonlocal resp + nonlocal wait_event + resp = chat_from_llama_response(result) + wait_event.set() + + messages_str = _messages_str_from_chat_messages(messages) + + cmd = Cmd.create("call_chat") + cmd.set_property_string("messages", messages_str) + cmd.set_property_bool("stream", False) + self.ten.log_info( + f"LlamaLLM chat send_cmd {cmd.get_name()}, messages {messages_str}" + ) + + self.ten.send_cmd(cmd, callback) + wait_event.wait() + return resp + + @llm_completion_callback() + def complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponse: + raise NotImplementedError("LlamaLLM complete hasn't been implemented yet") + + @llm_chat_callback() + def stream_chat( + self, messages: Sequence[ChatMessage], **kwargs: Any + ) -> ChatResponseGen: + self.ten.log_debug("LlamaLLM stream_chat start") + + cur_tokens = "" + resp_queue = queue.Queue() + + def gen() -> ChatResponseGen: + while True: + delta_text = resp_queue.get() + if delta_text is None: + break + + yield ChatResponse( + message=ChatMessage(content=delta_text, role=MessageRole.ASSISTANT), + delta=delta_text, + ) + + def callback(_, result, __): + nonlocal cur_tokens + nonlocal resp_queue + + status = result.get_status_code() + if status != StatusCode.OK: + self.ten.log_warn(f"LlamaLLM stream_chat callback status {status}") + resp_queue.put(None) + return + + cur_tokens = result.get_property_string("text") + self.ten.log_debug(f"LlamaLLM stream_chat callback text [{cur_tokens}]") + resp_queue.put(cur_tokens) + if result.get_is_final(): + resp_queue.put(None) + + messages_str = _messages_str_from_chat_messages(messages) + + cmd = Cmd.create("call_chat") + cmd.set_property_string("messages", messages_str) + cmd.set_property_bool("stream", True) + self.ten.log_info( + f"LlamaLLM stream_chat send_cmd {cmd.get_name()}, messages {messages_str}" + ) + self.ten.send_cmd(cmd, callback) + return gen() + + def stream_complete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponseGen: + raise NotImplementedError( + "LlamaLLM stream_complete hasn't been implemented yet" + ) + + @classmethod + def class_name(cls) -> str: + return "llama_llm" diff --git a/agents/ten_packages/extension/llama_index_chat_engine/llama_retriever.py b/agents/ten_packages/extension/llama_index_chat_engine/llama_retriever.py new file mode 100644 index 0000000000000000000000000000000000000000..b7c511c57ddbd7a48c8c8cfc5b0ae9ac8d78b8b7 --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/llama_retriever.py @@ -0,0 +1,85 @@ +import json, threading +from typing import Any, List +from llama_index.core.schema import QueryBundle, TextNode +from llama_index.core.schema import NodeWithScore +from llama_index.core.retrievers import BaseRetriever + +from .llama_embedding import LlamaEmbedding +from ten import ( + TenEnv, + Cmd, + StatusCode, + CmdResult, +) + + +def format_node_result(ten: TenEnv, cmd_result: CmdResult) -> List[NodeWithScore]: + ten.log_info(f"LlamaRetriever retrieve response {cmd_result.to_json()}") + status = cmd_result.get_status_code() + try: + contents_json = cmd_result.get_property_to_json("response") + except Exception as e: + ten.log_warn(f"Failed to get response from cmd_result: {e}") + return [ + NodeWithScore( + node=TextNode(), + score=0.0, + ) + ] + contents = json.loads(contents_json) + if status != StatusCode.OK or len(contents) == 0: + return [ + NodeWithScore( + node=TextNode(), + score=0.0, + ) + ] + + nodes = [] + for result in contents: + text_node = TextNode( + text=result["content"], + ) + nodes.append(NodeWithScore(node=text_node, score=result["score"])) + return nodes + + +class LlamaRetriever(BaseRetriever): + ten: Any + embed_model: LlamaEmbedding + + def __init__(self, ten: TenEnv, coll: str): + super().__init__() + try: + self.ten = ten + self.embed_model = LlamaEmbedding(ten=ten) + self.collection_name = coll + except Exception as e: + ten.log_error(f"Failed to initialize LlamaRetriever: {e}") + + def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: + self.ten.log_info(f"LlamaRetriever retrieve: {query_bundle.to_json}") + + wait_event = threading.Event() + resp: List[NodeWithScore] = [] + + def cmd_callback(_, result, __): + nonlocal resp + nonlocal wait_event + resp = format_node_result(self.ten, result) + wait_event.set() + self.ten.log_debug("LlamaRetriever callback done") + + embedding = self.embed_model.get_query_embedding(query=query_bundle.query_str) + + query_cmd = Cmd.create("query_vector") + query_cmd.set_property_string("collection_name", self.collection_name) + query_cmd.set_property_int("top_k", 3) + query_cmd.set_property_from_json("embedding", json.dumps(embedding)) + self.ten.log_info( + f"LlamaRetriever send_cmd, collection_name: {self.collection_name}, embedding len: {len(embedding)}" + ) + self.ten.send_cmd(query_cmd, cmd_callback) + + wait_event.wait() + return resp diff --git a/agents/ten_packages/extension/llama_index_chat_engine/manifest.json b/agents/ten_packages/extension/llama_index_chat_engine/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..0da035e000170b71be408adca05df09447c8e2cc --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/manifest.json @@ -0,0 +1,172 @@ +{ + "type": "extension", + "name": "llama_index_chat_engine", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "chat_memory_token_limit": { + "type": "int32" + }, + "greeting": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "end_of_segment": { + "type": "bool" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + }, + { + "name": "file_chunk" + }, + { + "name": "file_chunked", + "property": { + "collection": { + "type": "string" + } + }, + "required": [ + "collection" + ] + }, + { + "name": "update_querying_collection", + "property": { + "filename": { + "type": "string" + }, + "collection": { + "type": "string" + } + }, + "required": [ + "filename", + "collection" + ] + } + ], + "cmd_out": [ + { + "name": "flush" + }, + { + "name": "call_chat", + "property": { + "messages": { + "type": "string" + }, + "stream": { + "type": "bool" + } + }, + "required": [ + "messages" + ], + "result": { + "property": { + "text": { + "type": "string" + } + }, + "required": [ + "text" + ] + } + }, + { + "name": "embed", + "property": { + "input": { + "type": "string" + } + }, + "required": [ + "input" + ], + "result": { + "property": { + "embedding": { + "type": "array", + "items": { + "type": "float64" + } + } + } + } + }, + { + "name": "query_vector", + "property": { + "collection_name": { + "type": "string" + }, + "top_k": { + "type": "int64" + }, + "embedding": { + "type": "array", + "items": { + "type": "float64" + } + } + }, + "required": [ + "collection_name", + "top_k", + "embedding" + ], + "result": { + "property": { + "response": { + "type": "array", + "items": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "score": { + "type": "float64" + } + } + } + } + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/llama_index_chat_engine/property.json b/agents/ten_packages/extension/llama_index_chat_engine/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/llama_index_chat_engine/requirements.txt b/agents/ten_packages/extension/llama_index_chat_engine/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d8165f23b6268716841041367232b9012fac26b --- /dev/null +++ b/agents/ten_packages/extension/llama_index_chat_engine/requirements.txt @@ -0,0 +1,2 @@ +nltk==3.8.1 +llama_index diff --git a/agents/ten_packages/extension/message_collector/README.md b/agents/ten_packages/extension/message_collector/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c5d6664d38415d839c115780e41f5a0aa57ad1d1 --- /dev/null +++ b/agents/ten_packages/extension/message_collector/README.md @@ -0,0 +1,29 @@ +# message_collector + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/message_collector/__init__.py b/agents/ten_packages/extension/message_collector/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..645dc801215620e030a431687f48186f711fbf8b --- /dev/null +++ b/agents/ten_packages/extension/message_collector/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from .src import addon diff --git a/agents/ten_packages/extension/message_collector/manifest.json b/agents/ten_packages/extension/message_collector/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..baeddee98abab2b1ed803a712b8f4c93b66f1070 --- /dev/null +++ b/agents/ten_packages/extension/message_collector/manifest.json @@ -0,0 +1,62 @@ +{ + "type": "extension", + "name": "message_collector", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "src/**.tent", + "src/**.py", + "README.md" + ] + }, + "api": { + "property": {}, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + }, + "stream_id": { + "type": "uint32" + }, + "end_of_segment": { + "type": "bool" + } + } + }, + { + "name": "content_data", + "property": { + "text": { + "type": "string" + }, + "end_of_segment": { + "type": "bool" + } + } + } + ], + "data_out": [ + { + "name": "data" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/message_collector/property.json b/agents/ten_packages/extension/message_collector/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/message_collector/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/message_collector/src/__init__.py b/agents/ten_packages/extension/message_collector/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/ten_packages/extension/message_collector/src/addon.py b/agents/ten_packages/extension/message_collector/src/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..bab5eb191aaaf642791e6540e69fc05245a7d2d2 --- /dev/null +++ b/agents/ten_packages/extension/message_collector/src/addon.py @@ -0,0 +1,22 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("message_collector") +class MessageCollectorExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import MessageCollectorExtension + ten_env.log_info("on_create_instance") + ten_env.on_create_instance_done( + MessageCollectorExtension(name), context) diff --git a/agents/ten_packages/extension/message_collector/src/extension.py b/agents/ten_packages/extension/message_collector/src/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..391404caaa3816bed0b492a61d256b1298eb9e09 --- /dev/null +++ b/agents/ten_packages/extension/message_collector/src/extension.py @@ -0,0 +1,288 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import base64 +import json +import threading +import time +import uuid +from ten import ( + AudioFrame, + VideoFrame, + Extension, + TenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +import asyncio + +MAX_SIZE = 800 # 1 KB limit +OVERHEAD_ESTIMATE = 200 # Estimate for the overhead of metadata in the JSON + +CMD_NAME_FLUSH = "flush" + +TEXT_DATA_TEXT_FIELD = "text" +TEXT_DATA_FINAL_FIELD = "is_final" +TEXT_DATA_STREAM_ID_FIELD = "stream_id" +TEXT_DATA_END_OF_SEGMENT_FIELD = "end_of_segment" + +MAX_CHUNK_SIZE_BYTES = 1024 + + +def _text_to_base64_chunks(_: TenEnv, text: str, msg_id: str) -> list: + # Ensure msg_id does not exceed 50 characters + if len(msg_id) > 36: + raise ValueError("msg_id cannot exceed 36 characters.") + + # Convert text to bytearray + byte_array = bytearray(text, "utf-8") + + # Encode the bytearray into base64 + base64_encoded = base64.b64encode(byte_array).decode("utf-8") + + # Initialize list to hold the final chunks + chunks = [] + + # We'll split the base64 string dynamically based on the final byte size + part_index = 0 + total_parts = ( + None # We'll calculate total parts once we know how many chunks we create + ) + + # Process the base64-encoded content in chunks + current_position = 0 + total_length = len(base64_encoded) + + while current_position < total_length: + part_index += 1 + + # Start guessing the chunk size by limiting the base64 content part + estimated_chunk_size = MAX_CHUNK_SIZE_BYTES # We'll reduce this dynamically + content_chunk = "" + count = 0 + while True: + # Create the content part of the chunk + content_chunk = base64_encoded[ + current_position : current_position + estimated_chunk_size + ] + + # Format the chunk + formatted_chunk = f"{msg_id}|{part_index}|{total_parts if total_parts else '???'}|{content_chunk}" + + # Check if the byte length of the formatted chunk exceeds the max allowed size + if len(bytearray(formatted_chunk, "utf-8")) <= MAX_CHUNK_SIZE_BYTES: + break + else: + # Reduce the estimated chunk size if the formatted chunk is too large + estimated_chunk_size -= 100 # Reduce content size gradually + count += 1 + + # ten_env.log_debug(f"chunk estimate guess: {count}") + + # Add the current chunk to the list + chunks.append(formatted_chunk) + # Move to the next part of the content + current_position += estimated_chunk_size + + # Now that we know the total number of parts, update the chunks with correct total_parts + total_parts = len(chunks) + updated_chunks = [chunk.replace("???", str(total_parts)) for chunk in chunks] + + return updated_chunks + + +class MessageCollectorExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + self.queue = asyncio.Queue() + self.loop = None + self.cached_text_map = {} + + def on_init(self, ten_env: TenEnv) -> None: + ten_env.log_info("on_init") + ten_env.on_init_done() + + def on_start(self, ten_env: TenEnv) -> None: + ten_env.log_info("on_start") + + # TODO: read properties, initialize resources + self.loop = asyncio.new_event_loop() + + def start_loop(): + asyncio.set_event_loop(self.loop) + self.loop.run_forever() + + threading.Thread(target=start_loop, args=[]).start() + + self.loop.create_task(self._process_queue(ten_env)) + + ten_env.on_start_done() + + def on_stop(self, ten_env: TenEnv) -> None: + ten_env.log_info("on_stop") + + # TODO: clean up resources + + ten_env.on_stop_done() + + def on_deinit(self, ten_env: TenEnv) -> None: + ten_env.log_info("on_deinit") + ten_env.on_deinit_done() + + def on_cmd(self, ten_env: TenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_info("on_cmd name {}".format(cmd_name)) + + # TODO: process cmd + + cmd_result = CmdResult.create(StatusCode.OK) + ten_env.return_result(cmd_result, cmd) + + def on_data(self, ten_env: TenEnv, data: Data) -> None: + """ + on_data receives data from ten graph. + current suppotend data: + - name: text_data + example: + {"name": "text_data", "properties": {"text": "hello", "is_final": true, "stream_id": 123, "end_of_segment": true}} + """ + # ten_env.log_debug(f"on_data") + text = "" + final = True + stream_id = 0 + end_of_segment = False + + + # Add the raw data type if the data is raw text data + if data.get_name() == "text_data": + try: + text = data.get_property_string(TEXT_DATA_TEXT_FIELD) + except Exception as e: + ten_env.log_error( + f"on_data get_property_string {TEXT_DATA_TEXT_FIELD} error: {e}" + ) + + try: + final = data.get_property_bool(TEXT_DATA_FINAL_FIELD) + except Exception: + pass + + try: + stream_id = data.get_property_int(TEXT_DATA_STREAM_ID_FIELD) + except Exception: + pass + + try: + end_of_segment = data.get_property_bool(TEXT_DATA_END_OF_SEGMENT_FIELD) + except Exception as e: + ten_env.log_warn( + f"on_data get_property_bool {TEXT_DATA_END_OF_SEGMENT_FIELD} error: {e}" + ) + + ten_env.log_info( + f"on_data {TEXT_DATA_TEXT_FIELD}: {text} {TEXT_DATA_FINAL_FIELD}: {final} {TEXT_DATA_STREAM_ID_FIELD}: {stream_id} {TEXT_DATA_END_OF_SEGMENT_FIELD}: {end_of_segment}" + ) + + # We cache all final text data and append the non-final text data to the cached data + # until the end of the segment. + if end_of_segment: + if stream_id in self.cached_text_map: + text = self.cached_text_map[stream_id] + text + del self.cached_text_map[stream_id] + else: + if final: + if stream_id in self.cached_text_map: + text = self.cached_text_map[stream_id] + text + + self.cached_text_map[stream_id] = text + + # Generate a unique message ID for this batch of parts + message_id = str(uuid.uuid4())[:8] + + # Prepare the main JSON structure without the text field + base_msg_data = { + "is_final": end_of_segment, + "stream_id": stream_id, + "message_id": message_id, # Add message_id to identify the split message + "data_type": "transcribe", + "text_ts": int(time.time() * 1000), # Convert to milliseconds + "text": text, + } + + + try: + chunks = _text_to_base64_chunks(ten_env, json.dumps(base_msg_data), message_id) + for chunk in chunks: + asyncio.run_coroutine_threadsafe(self._queue_message(chunk), self.loop) + + except Exception as e: + ten_env.log_warn(f"on_data new_data error: {e}") + elif data.get_name() == "content_data": + try: + text = data.get_property_string(TEXT_DATA_TEXT_FIELD) + except Exception as e: + ten_env.log_error( + f"on_data get_property_string {TEXT_DATA_TEXT_FIELD} error: {e}" + ) + + try: + end_of_segment = data.get_property_bool(TEXT_DATA_END_OF_SEGMENT_FIELD) + except Exception as e: + ten_env.log_warn( + f"on_data get_property_bool {TEXT_DATA_END_OF_SEGMENT_FIELD} error: {e}" + ) + + ten_env.log_info( + f"on_data {TEXT_DATA_TEXT_FIELD}: {text}" + ) + + # Generate a unique message ID for this batch of parts + message_id = str(uuid.uuid4())[:8] + + # Prepare the main JSON structure without the text field + base_msg_data = { + "is_final": end_of_segment, + "stream_id": stream_id, + "message_id": message_id, # Add message_id to identify the split message + "data_type": "raw", + "text_ts": int(time.time() * 1000), # Convert to milliseconds + "text": text, + } + + + try: + chunks = _text_to_base64_chunks(ten_env, json.dumps(base_msg_data), message_id) + for chunk in chunks: + asyncio.run_coroutine_threadsafe(self._queue_message(chunk), self.loop) + + except Exception as e: + ten_env.log_warn(f"on_data new_data error: {e}") + + def on_audio_frame(self, ten_env: TenEnv, audio_frame: AudioFrame) -> None: + # TODO: process pcm frame + pass + + def on_video_frame(self, ten_env: TenEnv, video_frame: VideoFrame) -> None: + # TODO: process image frame + pass + + async def _queue_message(self, data: str): + await self.queue.put(data) + + async def _process_queue(self, ten_env: TenEnv): + while True: + data = await self.queue.get() + if data is None: + break + # process data + ten_data = Data.create("data") + ten_data.set_property_buf("data", data.encode()) + ten_env.send_data(ten_data) + self.queue.task_done() + await asyncio.sleep(0.04) diff --git a/agents/ten_packages/extension/message_collector_rtm/README.md b/agents/ten_packages/extension/message_collector_rtm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d7ef222c2806765b6a3070985d6c0c2f057fcc78 --- /dev/null +++ b/agents/ten_packages/extension/message_collector_rtm/README.md @@ -0,0 +1,29 @@ +# message_collector_rtm + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/message_collector_rtm/__init__.py b/agents/ten_packages/extension/message_collector_rtm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..645dc801215620e030a431687f48186f711fbf8b --- /dev/null +++ b/agents/ten_packages/extension/message_collector_rtm/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from .src import addon diff --git a/agents/ten_packages/extension/message_collector_rtm/manifest.json b/agents/ten_packages/extension/message_collector_rtm/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..516a2923df9b172cd25c704b0b63881914989964 --- /dev/null +++ b/agents/ten_packages/extension/message_collector_rtm/manifest.json @@ -0,0 +1,103 @@ +{ + "type": "extension", + "name": "message_collector_rtm", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "src/**.tent", + "src/**.py", + "README.md" + ] + }, + "api": { + "property": {}, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + }, + "stream_id": { + "type": "uint32" + }, + "end_of_segment": { + "type": "bool" + } + } + }, + { + "name": "rtm_message_event", + "property": { + "message": { + "type": "string" + } + } + }, + { + "name": "rtm_storage_event", + "property": {} + }, + { + "name": "rtm_presence_event", + "property": {} + }, + { + "name": "rtm_lock_event", + "property": {} + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ], + "cmd_in": [ + { + "name": "on_user_audio_track_state_changed", + "property": {} + } + ], + "cmd_out": [ + { + "name": "publish", + "property": { + "message": { + "type": "buf" + } + } + }, + { + "name": "set_presence_state", + "property": { + "states": { + "type": "string" + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/message_collector_rtm/property.json b/agents/ten_packages/extension/message_collector_rtm/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/message_collector_rtm/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/message_collector_rtm/src/__init__.py b/agents/ten_packages/extension/message_collector_rtm/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/ten_packages/extension/message_collector_rtm/src/addon.py b/agents/ten_packages/extension/message_collector_rtm/src/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..c7800e34c7c618992cd12997f662b4a40d1a9759 --- /dev/null +++ b/agents/ten_packages/extension/message_collector_rtm/src/addon.py @@ -0,0 +1,22 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("message_collector_rtm") +class MessageCollectorRTMExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import MessageCollectorRTMExtension + + ten_env.log_info("MessageCollectorRTMExtensionAddon on_create_instance") + ten_env.on_create_instance_done(MessageCollectorRTMExtension(name), context) diff --git a/agents/ten_packages/extension/message_collector_rtm/src/extension.py b/agents/ten_packages/extension/message_collector_rtm/src/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..e640987cb28af78568d9b98d1a44c9b2dcaa13cb --- /dev/null +++ b/agents/ten_packages/extension/message_collector_rtm/src/extension.py @@ -0,0 +1,231 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import json +import time +import uuid +import asyncio + +from ten import ( + AudioFrame, + VideoFrame, + AsyncExtension, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) + +TEXT_DATA_TEXT_FIELD = "text" +TEXT_DATA_FINAL_FIELD = "is_final" +TEXT_DATA_STREAM_ID_FIELD = "stream_id" +TEXT_DATA_END_OF_SEGMENT_FIELD = "end_of_segment" + + +class MessageCollectorRTMExtension(AsyncExtension): + # Create the queue for message processing + def __init__(self, name: str): + super().__init__(name) + self.queue = asyncio.Queue() + self.cached_text_map = {} + self.loop = None + self.ten_env = None + self.stopped = False + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("MessageCollectorRTMExtension on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("MessageCollectorRTMExtension on_start") + self.loop = asyncio.get_event_loop() + self.ten_env = ten_env + self.loop.create_task(self._process_queue()) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("on_stop") + self.stopped = True + await self.queue.put(None) + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_info("MessageCollectorRTMExtension on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_info("on_cmd name {}".format(cmd_name)) + try: + if cmd_name == "on_user_audio_track_state_changed": + await self.handle_user_state_changed(cmd) + else: + ten_env.log_warn(f"unsupported cmd {cmd_name}") + + cmd_result = CmdResult.create(StatusCode.OK) + await ten_env.return_result(cmd_result, cmd) + except Exception as e: + ten_env.log_error(f"on_cmd error: {e}") + cmd_result = CmdResult.create(StatusCode.ERROR) + await ten_env.return_result(cmd_result, cmd) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + """ + on_data receives data from ten graph. + current suppotend data: + - name: text_data + example: + {"name": "text_data", "properties": {"text": "hello", "is_final": true, "stream_id": 123, "end_of_segment": true}} + - name: rtm_message_event + example: + {"name": "rtm_message_event", "properties": {"message": "hello"}} + """ + data_name = data.get_name() + if data_name == "text_data": + await self.on_text_data(data) + elif data_name == "rtm_message_event": + await self.on_rtm_message_event(data) + else: + ten_env.log_warn(f"unsupported data {data_name}") + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + pass + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + pass + + async def on_text_data(self, data: Data) -> None: + text = "" + final = True + stream_id = 0 + end_of_segment = False + + try: + text = data.get_property_string(TEXT_DATA_TEXT_FIELD) + except Exception as e: + self.ten_env.log_error( + f"on_data get_property_string {TEXT_DATA_TEXT_FIELD} error: {e}" + ) + + try: + final = data.get_property_bool(TEXT_DATA_FINAL_FIELD) + except Exception: + pass + + try: + stream_id = data.get_property_int(TEXT_DATA_STREAM_ID_FIELD) + except Exception: + pass + + try: + end_of_segment = data.get_property_bool(TEXT_DATA_END_OF_SEGMENT_FIELD) + except Exception as e: + self.ten_env.log_error( + f"on_data get_property_bool {TEXT_DATA_END_OF_SEGMENT_FIELD} error: {e}" + ) + + self.ten_env.log_debug( + f"on_data {TEXT_DATA_TEXT_FIELD}: {text} {TEXT_DATA_FINAL_FIELD}: {final} {TEXT_DATA_STREAM_ID_FIELD}: {stream_id} {TEXT_DATA_END_OF_SEGMENT_FIELD}: {end_of_segment}" + ) + + # We cache all final text data and append the non-final text data to the cached data + # until the end of the segment. + if end_of_segment: + if stream_id in self.cached_text_map: + text = self.cached_text_map[stream_id] + text + del self.cached_text_map[stream_id] + else: + if final: + if stream_id in self.cached_text_map: + text = self.cached_text_map[stream_id] + text + + self.cached_text_map[stream_id] = text + + # Generate a unique message ID for this batch of parts + message_id = str(uuid.uuid4())[:8] + # Prepare the main JSON structure without the text field + text_data = { + "is_final": end_of_segment, + "stream_id": stream_id, + "message_id": message_id, # Add message_id to identify the split message + "type": "transcribe", + "ts": int(time.time() * 1000), # Convert to milliseconds + "text": text, + } + await self._queue_message("text_data", text_data) + + async def on_rtm_message_event(self, data: Data) -> None: + self.ten_env.log_debug("on_data rtm_message_event") + try: + text = data.get_property_string("message") + data = Data.create("text_data") + data.set_property_string("text", text) + data.set_property_bool("is_final", True) + asyncio.create_task(self.ten_env.send_data(data)) + except Exception as e: + self.ten_env.log_error(f"Failed to handle on_rtm_message_event data: {e}") + + async def handle_user_state_changed(self, cmd: Cmd) -> None: + try: + remote_user_id = cmd.get_property_string("remote_user_id") + state = cmd.get_property_int("state") + reason = cmd.get_property_int("reason") + self.ten_env.log_info( + f"handle_user_state_changed user_id: {remote_user_id} state: {state} reason: {reason}" + ) + user_state = { + "remote_user_id": remote_user_id, + "state": str(state), + "reason": str(reason), + } + await self._queue_message("user_state", user_state) + except Exception as e: + self.ten_env.log_error(f"handle_user_state_changed error: {e}") + + async def _queue_message(self, data_type: str, data: dict): + await self.queue.put({"type": data_type, "data": data}) + + async def _process_queue(self): + self.ten_env.log_info("start async loop") + while not self.stopped: + try: + item = await self.queue.get() + if item is None: + break + data_type = item["type"] + data = item["data"] + # process data + if data_type == "text_data": + await self._handle_text_data(data) + elif data_type == "user_state": + await self._handle_user_state(data) + self.queue.task_done() + await asyncio.sleep(0.04) + except Exception as e: + self.ten_env.log_error(f"Failed to process queue: {e}") + + async def _handle_text_data(self, data: dict): + try: + self.ten_env.log_debug(f"Handling text data: {data}") + json_bytes = json.dumps(data).encode("utf-8") + cmd = Cmd.create("publish") + cmd.set_property_buf("message", json_bytes) + [cmd_result, _] = await self.ten_env.send_cmd(cmd) + self.ten_env.log_info(f"send_cmd result {cmd_result.to_json()}") + except Exception as e: + self.ten_env.log_error(f"Failed to handle text data: {e}") + + async def _handle_user_state(self, data: dict): + try: + json_bytes = json.dumps(data) + cmd = Cmd.create("set_presence_state") + cmd.set_property_string("states", json_bytes) + [cmd_result, _] = await self.ten_env.send_cmd(cmd) + self.ten_env.log_info(f"send_cmd result {cmd_result.to_json()}") + except Exception as e: + self.ten_env.log_error(f"Failed to handle user state: {e}") diff --git a/agents/ten_packages/extension/minimax_tts/go.mod b/agents/ten_packages/extension/minimax_tts/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..82c072d98f1e37b57938078485008ab2491bf700 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/go.mod @@ -0,0 +1,12 @@ +module minimax_tts + +go 1.20 + +replace ten_framework => ../../system/ten_runtime_go/interface + +require ( + github.com/go-resty/resty/v2 v2.16.0 + ten_framework v0.0.0-00010101000000-000000000000 +) + +require golang.org/x/net v0.27.0 // indirect diff --git a/agents/ten_packages/extension/minimax_tts/go.sum b/agents/ten_packages/extension/minimax_tts/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..6ceffb66ddc9adf638c10206ed0af8af3ecf8253 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/go.sum @@ -0,0 +1,5 @@ +github.com/go-resty/resty/v2 v2.16.0 h1:qpKalHWI2bpp9BIKlyT8TYWEJXOk1NuKbfiT3RRnzWc= +github.com/go-resty/resty/v2 v2.16.0/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= diff --git a/agents/ten_packages/extension/minimax_tts/manifest.json b/agents/ten_packages/extension/minimax_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..8846415d762173c4911e7ed3bd1422f93585a1b7 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/manifest.json @@ -0,0 +1,62 @@ +{ + "type": "extension", + "name": "minimax_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "model": { + "type": "string" + }, + "request_timeout_seconds": { + "type": "int64" + }, + "sample_rate": { + "type": "int64" + }, + "url": { + "type": "string" + }, + "voice_id": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts/minimax_tts.go b/agents/ten_packages/extension/minimax_tts/minimax_tts.go new file mode 100644 index 0000000000000000000000000000000000000000..3d22005f090d2a73eed443403d509b5a0416ce06 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/minimax_tts.go @@ -0,0 +1,161 @@ +/** + * + * Agora Real Time Engagement + * Created by XinHui Li in 2024. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// An extension written by Go for TTS +package extension + +import ( + "bufio" + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "ten_framework/ten" + + "github.com/go-resty/resty/v2" +) + +type minimaxTTS struct { + client *resty.Client + config minimaxTTSConfig +} + +type minimaxTTSConfig struct { + ApiKey string + GroupId string + Model string + RequestTimeoutSeconds int + SampleRate int32 + Url string + VoiceId string +} + +func defaultMinimaxTTSConfig() minimaxTTSConfig { + return minimaxTTSConfig{ + ApiKey: "", + GroupId: "", + Model: "speech-01-turbo", + RequestTimeoutSeconds: 10, + SampleRate: 32000, + Url: "https://api.minimax.chat/v1/t2a_v2", + VoiceId: "male-qn-qingse", + } +} + +func newMinimaxTTS(config minimaxTTSConfig) (*minimaxTTS, error) { + return &minimaxTTS{ + config: config, + client: resty.New(). + SetRetryCount(0). + SetTimeout(time.Duration(config.RequestTimeoutSeconds) * time.Second), + }, nil +} + +func (e *minimaxTTS) textToSpeechStream(tenEnv ten.TenEnv, streamWriter io.Writer, text string) (err error) { + tenEnv.LogDebug("textToSpeechStream start tts") + + payload := map[string]any{ + "audio_setting": map[string]any{ + "channel": 1, + "format": "pcm", + "sample_rate": e.config.SampleRate, + }, + "model": e.config.Model, + "pronunciation_dict": map[string]any{ + "tone": []string{}, + }, + "stream": true, + "text": text, + "voice_setting": map[string]any{ + "pitch": 0, + "speed": 1.0, + "voice_id": e.config.VoiceId, + "vol": 1.0, + }, + } + + resp, err := e.client.R(). + SetHeader("Content-Type", "application/json"). + SetHeader("Authorization", "Bearer "+e.config.ApiKey). + SetDoNotParseResponse(true). + SetBody(payload). + Post(fmt.Sprintf("%s?GroupId=%s", e.config.Url, e.config.GroupId)) + + if err != nil { + tenEnv.LogError(fmt.Sprintf("request failed, err: %v, text: %s", err, text)) + return fmt.Errorf("textToSpeechStream failed, err: %v", err) + } + + defer func() { + resp.RawBody().Close() + + tenEnv.LogDebug(fmt.Sprintf("textToSpeechStream close response, err: %v, text: %s", err, text)) + }() + + // Check the response status code + if resp.StatusCode() != http.StatusOK { + tenEnv.LogError(fmt.Sprintf("unexpected response status: %d", resp.StatusCode())) + return fmt.Errorf("unexpected response status: %d", resp.StatusCode()) + } + + reader := bufio.NewReader(resp.RawBody()) + for { + line, err := reader.ReadBytes('\n') + if err != nil { + if err == io.EOF { + break + } + + tenEnv.LogError(fmt.Sprintf("failed to read line: %v", err)) + return err + } + + if !bytes.HasPrefix(line, []byte("data:")) { + tenEnv.LogDebug(fmt.Sprintf("drop chunk, text: %s, line: %s", text, line)) + continue + } + + var chunk struct { + Data struct { + Audio string `json:"audio"` + Status int `json:"status"` + } `json:"data"` + TraceId string `json:"trace_id"` + BaseResp struct { + StatusCode int `json:"status_code"` + StatusMsg string `json:"status_msg"` + } `json:"base_resp"` + } + + if err = json.Unmarshal(line[5:], &chunk); err != nil { + tenEnv.LogError(fmt.Sprintf("failed to decode JSON chunk: %v", err)) + break + } + + if chunk.Data.Status == 2 { + break + } + + audioData, err := hex.DecodeString(chunk.Data.Audio) + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to decode audio data: %v, traceId: %s, BaseResp: %v", err, chunk.TraceId, chunk.BaseResp)) + break + } + + _, err = streamWriter.Write(audioData) + if err != nil { + tenEnv.LogError(fmt.Sprintf("failed to write to streamWriter: %v, traceId: %s, BaseResp: %v", err, chunk.TraceId, chunk.BaseResp)) + break + } + } + + return +} diff --git a/agents/ten_packages/extension/minimax_tts/minimax_tts_extension.go b/agents/ten_packages/extension/minimax_tts/minimax_tts_extension.go new file mode 100644 index 0000000000000000000000000000000000000000..7bf132c38e412fa3bccf84ae095450937fd44048 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/minimax_tts_extension.go @@ -0,0 +1,324 @@ +/** + * + * Agora Real Time Engagement + * Created by XinHui Li in 2024. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// An extension written by Go for TTS +package extension + +import ( + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "ten_framework/ten" +) + +const ( + cmdInFlush = "flush" + cmdOutFlush = "flush" + dataInTextDataPropertyText = "text" + + propertyApiKey = "api_key" // Required + propertyGroupId = "group_id" // Required + propertyModel = "model" // Optional + propertyRequestTimeoutSeconds = "request_timeout_seconds" // Optional + propertySampleRate = "sample_rate" // Optional + propertyUrl = "url" // Optional + propertyVoiceId = "voice_id" // Optional +) + +const ( + textChanMax = 1024 +) + +var ( + outdateTs atomic.Int64 + textChan chan *message + wg sync.WaitGroup +) + +type minimaxTTSExtension struct { + ten.DefaultExtension + minimaxTTS *minimaxTTS +} + +type message struct { + text string + receivedTs int64 +} + +func newMinimaxTTSExtension(name string) ten.Extension { + return &minimaxTTSExtension{} +} + +// OnStart will be called when the extension is starting, +// properies can be read here to initialize and start the extension. +// current supported properties: +// - api_key (required) +// - group_id (required) +// - model +// - request_timeout_seconds +// - sample_rate +// - url +// - voice_id +func (e *minimaxTTSExtension) OnStart(ten ten.TenEnv) { + ten.LogInfo("OnStart") + + // prepare configuration + minimaxTTSConfig := defaultMinimaxTTSConfig() + + if apiKey, err := ten.GetPropertyString(propertyApiKey); err != nil { + ten.LogError(fmt.Sprintf("GetProperty required %s failed, err: %v", propertyApiKey, err)) + return + } else { + minimaxTTSConfig.ApiKey = apiKey + } + + if groupId, err := ten.GetPropertyString(propertyGroupId); err != nil { + ten.LogError(fmt.Sprintf("GetProperty required %s failed, err: %v", propertyGroupId, err)) + return + } else { + minimaxTTSConfig.GroupId = groupId + } + + if model, err := ten.GetPropertyString(propertyModel); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyModel, err)) + } else { + if len(model) > 0 { + minimaxTTSConfig.Model = model + } + } + + if requestTimeoutSeconds, err := ten.GetPropertyInt64(propertyRequestTimeoutSeconds); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyRequestTimeoutSeconds, err)) + } else { + if requestTimeoutSeconds > 0 { + minimaxTTSConfig.RequestTimeoutSeconds = int(requestTimeoutSeconds) + } + } + + if sampleRate, err := ten.GetPropertyInt64(propertySampleRate); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertySampleRate, err)) + } else { + if sampleRate > 0 { + minimaxTTSConfig.SampleRate = int32(sampleRate) + } + } + + if url, err := ten.GetPropertyString(propertyUrl); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyUrl, err)) + } else { + if len(url) > 0 { + minimaxTTSConfig.Url = url + } + } + + if voiceId, err := ten.GetPropertyString(propertyVoiceId); err != nil { + ten.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyVoiceId, err)) + } else { + minimaxTTSConfig.VoiceId = voiceId + } + + // create minimaxTTS instance + minimaxTTS, err := newMinimaxTTS(minimaxTTSConfig) + if err != nil { + ten.LogError(fmt.Sprintf("newMinimaxTTS failed, err: %v", err)) + return + } + + ten.LogInfo(fmt.Sprintf("newMinimaxTTS succeed with Model: %s", minimaxTTSConfig.Model)) + + // set minimaxTTS instance + e.minimaxTTS = minimaxTTS + + // create pcm instance + pcmConfig := defaultPcmConfig() + pcmConfig.SampleRate = minimaxTTSConfig.SampleRate + pcmConfig.SamplesPerChannel = minimaxTTSConfig.SampleRate / 100 + pcm := newPcm(pcmConfig) + pcmFrameSize := pcm.getPcmFrameSize() + + // init chan + textChan = make(chan *message, textChanMax) + + go func() { + ten.LogInfo("process textChan") + + for msg := range textChan { + if msg.receivedTs < outdateTs.Load() { // Check whether to interrupt + ten.LogInfo(fmt.Sprintf("textChan interrupt and flushing for input text: [%s], receivedTs: %d, outdateTs: %d", + msg.text, msg.receivedTs, outdateTs.Load())) + continue + } + + wg.Add(1) + ten.LogInfo(fmt.Sprintf("textChan text: [%s]", msg.text)) + + r, w := io.Pipe() + startTime := time.Now() + + go func() { + defer wg.Done() + defer w.Close() + + ten.LogInfo(fmt.Sprintf("textToSpeechStream text: [%s]", msg.text)) + err = e.minimaxTTS.textToSpeechStream(ten, w, msg.text) + ten.LogInfo(fmt.Sprintf("textToSpeechStream result: [%v]", err)) + if err != nil { + ten.LogError(fmt.Sprintf("textToSpeechStream failed, err: %v", err)) + return + } + }() + + ten.LogInfo(fmt.Sprintf("read pcm stream, text:[%s], pcmFrameSize:%d", msg.text, pcmFrameSize)) + + var ( + firstFrameLatency int64 + n int + pcmFrameRead int + readBytes int + sentFrames int + ) + buf := pcm.newBuf() + + // read pcm stream + for { + if msg.receivedTs < outdateTs.Load() { // Check whether to interrupt + ten.LogInfo(fmt.Sprintf("read pcm stream interrupt and flushing for input text: [%s], receivedTs: %d, outdateTs: %d", + msg.text, msg.receivedTs, outdateTs.Load())) + break + } + + n, err = r.Read(buf[pcmFrameRead:]) + readBytes += n + pcmFrameRead += n + + if err != nil { + if err == io.EOF { + ten.LogInfo("read pcm stream EOF") + break + } + + ten.LogError(fmt.Sprintf("read pcm stream failed, err: %v", err)) + break + } + + if pcmFrameRead != pcmFrameSize { + ten.LogDebug(fmt.Sprintf("the number of bytes read is [%d] inconsistent with pcm frame size", pcmFrameRead)) + continue + } + + pcm.send(ten, buf) + // clear buf + buf = pcm.newBuf() + pcmFrameRead = 0 + sentFrames++ + + if firstFrameLatency == 0 { + firstFrameLatency = time.Since(startTime).Milliseconds() + ten.LogInfo(fmt.Sprintf("first frame available for text: [%s], receivedTs: %d, firstFrameLatency: %dms", msg.text, msg.receivedTs, firstFrameLatency)) + } + + ten.LogDebug(fmt.Sprintf("sending pcm data, text: [%s]", msg.text)) + } + + if pcmFrameRead > 0 { + pcm.send(ten, buf) + sentFrames++ + ten.LogInfo(fmt.Sprintf("sending pcm remain data, text: [%s], pcmFrameRead: %d", msg.text, pcmFrameRead)) + } + + r.Close() + ten.LogInfo(fmt.Sprintf("send pcm data finished, text: [%s], receivedTs: %d, readBytes: %d, sentFrames: %d, firstFrameLatency: %dms, finishLatency: %dms", + msg.text, msg.receivedTs, readBytes, sentFrames, firstFrameLatency, time.Since(startTime).Milliseconds())) + } + }() + + ten.OnStartDone() +} + +// OnCmd receives cmd from ten graph. +// current supported cmd: +// - name: flush +// example: +// {"name": "flush"} +func (e *minimaxTTSExtension) OnCmd( + tenEnv ten.TenEnv, + cmd ten.Cmd, +) { + cmdName, err := cmd.GetName() + if err != nil { + tenEnv.LogError(fmt.Sprintf("OnCmd get name failed, err: %v", err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + + tenEnv.LogInfo(fmt.Sprintf("OnCmd %s", cmdInFlush)) + + switch cmdName { + case cmdInFlush: + outdateTs.Store(time.Now().UnixMicro()) + + // send out + outCmd, err := ten.NewCmd(cmdOutFlush) + if err != nil { + tenEnv.LogError(fmt.Sprintf("new cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + + if err := tenEnv.SendCmd(outCmd, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("send cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } else { + tenEnv.LogInfo(fmt.Sprintf("cmd %s sent", cmdOutFlush)) + } + } + + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeOk) + tenEnv.ReturnResult(cmdResult, cmd, nil) +} + +// OnData receives data from ten graph. +// current supported data: +// - name: text_data +// example: +// {name: text_data, properties: {text: "hello"} +func (e *minimaxTTSExtension) OnData( + tenEnv ten.TenEnv, + data ten.Data, +) { + text, err := data.GetPropertyString(dataInTextDataPropertyText) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("OnData GetProperty %s failed, err: %v", dataInTextDataPropertyText, err)) + return + } + + if len(text) == 0 { + tenEnv.LogDebug("OnData text is empty, ignored") + return + } + + tenEnv.LogInfo(fmt.Sprintf("OnData input text: [%s]", text)) + + go func() { + textChan <- &message{text: text, receivedTs: time.Now().UnixMicro()} + }() +} + +func init() { + // Register addon + ten.RegisterAddonAsExtension( + "minimax_tts", + ten.NewDefaultExtensionAddon(newMinimaxTTSExtension), + ) +} diff --git a/agents/ten_packages/extension/minimax_tts/pcm.go b/agents/ten_packages/extension/minimax_tts/pcm.go new file mode 100644 index 0000000000000000000000000000000000000000..08b9546420dd803b0182b333493c8b8b931055b9 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/pcm.go @@ -0,0 +1,101 @@ +/** + * + * Agora Real Time Engagement + * Created by XinHui Li in 2024. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// An extension written by Go for TTS +package extension + +import ( + "fmt" + + "ten_framework/ten" +) + +type pcm struct { + config *pcmConfig +} + +type pcmConfig struct { + BytesPerSample int32 + Channel int32 + ChannelLayout uint64 + Name string + SampleRate int32 + SamplesPerChannel int32 + Timestamp int64 +} + +func defaultPcmConfig() *pcmConfig { + return &pcmConfig{ + BytesPerSample: 2, + Channel: 1, + ChannelLayout: 1, + Name: "pcm_frame", + SampleRate: 32000, + SamplesPerChannel: 32000 / 100, + Timestamp: 0, + } +} + +func newPcm(config *pcmConfig) *pcm { + return &pcm{ + config: config, + } +} + +func (p *pcm) getPcmFrame(tenEnv ten.TenEnv, buf []byte) (pcmFrame ten.AudioFrame, err error) { + pcmFrame, err = ten.NewAudioFrame(p.config.Name) + if err != nil { + tenEnv.LogError(fmt.Sprintf("NewAudioFrame failed, err: %v", err)) + return + } + + // set pcm frame + pcmFrame.SetBytesPerSample(p.config.BytesPerSample) + pcmFrame.SetSampleRate(p.config.SampleRate) + pcmFrame.SetChannelLayout(p.config.ChannelLayout) + pcmFrame.SetNumberOfChannels(p.config.Channel) + pcmFrame.SetTimestamp(p.config.Timestamp) + pcmFrame.SetDataFmt(ten.AudioFrameDataFmtInterleave) + pcmFrame.SetSamplesPerChannel(p.config.SamplesPerChannel) + pcmFrame.AllocBuf(p.getPcmFrameSize()) + + borrowedBuf, err := pcmFrame.LockBuf() + if err != nil { + tenEnv.LogError(fmt.Sprintf("LockBuf failed, err: %v", err)) + return + } + + // copy data + copy(borrowedBuf, buf) + + pcmFrame.UnlockBuf(&borrowedBuf) + return +} + +func (p *pcm) getPcmFrameSize() int { + return int(p.config.SamplesPerChannel * p.config.Channel * p.config.BytesPerSample) +} + +func (p *pcm) newBuf() []byte { + return make([]byte, p.getPcmFrameSize()) +} + +func (p *pcm) send(tenEnv ten.TenEnv, buf []byte) (err error) { + pcmFrame, err := p.getPcmFrame(tenEnv, buf) + if err != nil { + tenEnv.LogError(fmt.Sprintf("getPcmFrame failed, err: %v", err)) + return + } + + // send pcm + if err = tenEnv.SendAudioFrame(pcmFrame, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("SendAudioFrame failed, err: %v", err)) + return + } + + return +} diff --git a/agents/ten_packages/extension/minimax_tts/property.json b/agents/ten_packages/extension/minimax_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..166d524aa76dee882a8e3ba1a3daddbee80023a6 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts/property.json @@ -0,0 +1,9 @@ +{ + "api_key": "${env:MINIMAX_TTS_API_KEY}", + "group_id": "${env:MINIMAX_TTS_GROUP_ID}", + "model": "speech-01-turbo", + "request_timeout_seconds": 10, + "sample_rate": 32000, + "url": "https://api.minimax.chat/v1/t2a_v2", + "voice_id": "male-qn-qingse" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts_python/README.md b/agents/ten_packages/extension/minimax_tts_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..013a4631e8daa7209c7665b43c51d3de785ec6b9 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/README.md @@ -0,0 +1,29 @@ +# minimax_tts_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/minimax_tts_python/__init__.py b/agents/ten_packages/extension/minimax_tts_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/minimax_tts_python/addon.py b/agents/ten_packages/extension/minimax_tts_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..6bdf4ec5cb8838928d91984ae696f2b6475d6bf2 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("minimax_tts_python") +class MinimaxTTSExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import MinimaxTTSExtension + ten_env.log_info("MinimaxTTSExtensionAddon on_create_instance") + ten_env.on_create_instance_done(MinimaxTTSExtension(name), context) diff --git a/agents/ten_packages/extension/minimax_tts_python/extension.py b/agents/ten_packages/extension/minimax_tts_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..41ee8a4f5617dee66fd0dcdf7db1e71af4ea946b --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/extension.py @@ -0,0 +1,57 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import traceback +from ten_ai_base.tts import AsyncTTSBaseExtension +from .minimax_tts import MinimaxTTS, MinimaxTTSConfig +from ten import ( + AsyncTenEnv, +) + + +class MinimaxTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str): + super().__init__(name) + self.client = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + + config = await MinimaxTTSConfig.create_async(ten_env=ten_env) + + ten_env.log_info(f"config: {config.api_key}, {config.group_id}") + + if not config.api_key or not config.group_id: + raise ValueError("api_key and group_id are required") + + self.client = MinimaxTTS(config) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + try: + data = self.client.get(ten_env, input_text) + async for frame in data: + await self.send_audio_out( + ten_env, frame, sample_rate=self.client.config.sample_rate + ) + except Exception: + ten_env.log_error(f"on_request_tts failed: {traceback.format_exc()}") + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + return await super().on_cancel_tts(ten_env) diff --git a/agents/ten_packages/extension/minimax_tts_python/manifest.json b/agents/ten_packages/extension/minimax_tts_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..4a4dfbcb1ebb70cc97f1cd14df7d5e851fb70d79 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/manifest.json @@ -0,0 +1,73 @@ +{ + "type": "extension", + "name": "minimax_tts_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "group_id": { + "type": "string" + }, + "model": { + "type": "string" + }, + "request_timeout_seconds": { + "type": "int64" + }, + "sample_rate": { + "type": "int64" + }, + "url": { + "type": "string" + }, + "voice_id": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts_python/minimax_tts.py b/agents/ten_packages/extension/minimax_tts_python/minimax_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..d4715bd112bb165c5f824b04cd82712452d45edf --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/minimax_tts.py @@ -0,0 +1,129 @@ +import asyncio +from dataclasses import dataclass +import aiohttp +import json +from datetime import datetime +from typing import AsyncIterator + +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig + + +@dataclass +class MinimaxTTSConfig(BaseConfig): + api_key: str = "" + model: str = "speech-01-turbo" + voice_id: str = "male-qn-qingse" + sample_rate: int = 32000 + url: str = "https://api.minimax.chat/v1/t2a_v2" + group_id: str = "" + request_timeout_seconds: int = 10 + + +class MinimaxTTS: + def __init__(self, config: MinimaxTTSConfig): + self.config = config + + async def get(self, ten_env: AsyncTenEnv, text: str) -> AsyncIterator[bytes]: + payload = json.dumps( + { + "model": self.config.model, + "text": text, + "stream": True, + "voice_setting": { + "voice_id": self.config.voice_id, + "speed": 1.0, + "vol": 1.0, + "pitch": 0, + }, + "pronunciation_dict": {"tone": []}, + "audio_setting": { + "sample_rate": self.config.sample_rate, + "format": "pcm", + "channel": 1, + }, + } + ) + + url = f"{self.config.url}?GroupId={self.config.group_id}" + headers = { + "accept": "application/json, text/plain, */*", + "Authorization": f"Bearer {self.config.api_key}", + "Content-Type": "application/json", + } + + start_time = datetime.now() + ten_env.log_info(f"Start request, url: {self.config.url}, text: {text}") + ttfb = None + + async with aiohttp.ClientSession() as session: + try: + async with session.post(url, headers=headers, data=payload) as response: + trace_id = "" + alb_receive_time = "" + + try: + trace_id = response.headers.get("Trace-Id") + except Exception: + ten_env.log_warn("get response, no Trace-Id") + try: + alb_receive_time = response.headers.get("alb_receive_time") + except Exception: + ten_env.log_warn("get response, no alb_receive_time") + + ten_env.log_info( + f"get response trace-id: {trace_id}, alb_receive_time: {alb_receive_time}, cost_time {self._duration_in_ms_since(start_time)}ms" + ) + + if response.status != 200: + raise RuntimeError( + f"Request failed with status {response.status}" + ) + + buffer = b"" + async for chunk in response.content.iter_chunked( + 1024 + ): # Read in 1024 byte chunks + buffer += chunk + + # Split the buffer into lines based on newline character + while b"\n" in buffer: + line, buffer = buffer.split(b"\n", 1) + + # Process only lines that start with "data:" + if line.startswith(b"data:"): + try: + json_data = json.loads( + line[5:].decode("utf-8").strip() + ) + + # Check for the required keys in the JSON data + if ( + "data" in json_data + and "extra_info" not in json_data + ): + audio = json_data["data"].get("audio") + if audio: + decoded_hex = bytes.fromhex(audio) + yield decoded_hex + except (json.JSONDecodeError, UnicodeDecodeError) as e: + # Handle malformed JSON or decoding errors + ten_env.log_warn(f"Error decoding line: {e}") + continue + if not ttfb: + ttfb = self._duration_in_ms_since(start_time) + ten_env.log_info(f"trace-id: {trace_id}, ttfb {ttfb}ms") + except aiohttp.ClientError as e: + ten_env.log_error(f"Client error occurred: {e}") + except asyncio.TimeoutError: + ten_env.log_error("Request timed out") + finally: + ten_env.log_info( + f"http loop done, cost_time {self._duration_in_ms_since(start_time)}ms" + ) + + def _duration_in_ms(self, start: datetime, end: datetime) -> int: + return int((end - start).total_seconds() * 1000) + + def _duration_in_ms_since(self, start: datetime) -> int: + return self._duration_in_ms(start, datetime.now()) diff --git a/agents/ten_packages/extension/minimax_tts_python/property.json b/agents/ten_packages/extension/minimax_tts_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..166d524aa76dee882a8e3ba1a3daddbee80023a6 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/property.json @@ -0,0 +1,9 @@ +{ + "api_key": "${env:MINIMAX_TTS_API_KEY}", + "group_id": "${env:MINIMAX_TTS_GROUP_ID}", + "model": "speech-01-turbo", + "request_timeout_seconds": 10, + "sample_rate": 32000, + "url": "https://api.minimax.chat/v1/t2a_v2", + "voice_id": "male-qn-qingse" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts_python/requirements.txt b/agents/ten_packages/extension/minimax_tts_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce2357185aba0b35ffd5d50fb0f6500161ec0293 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/requirements.txt @@ -0,0 +1 @@ +aiohttp \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts_python/tests/bin/start b/agents/ten_packages/extension/minimax_tts_python/tests/bin/start new file mode 100644 index 0000000000000000000000000000000000000000..04d784ea179c32ded5fc50565fb28b4ae0585c6b --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/tests/bin/start @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")/../.." + +export PYTHONPATH=.ten/app:.ten/app/ten_packages/system/ten_runtime_python/lib:.ten/app/ten_packages/system/ten_runtime_python/interface:.ten/app/ten_packages/system/ten_ai_base/interface:$PYTHONPATH + +# If the Python app imports some modules that are compiled with a different +# version of libstdc++ (ex: PyTorch), the Python app may encounter confusing +# errors. To solve this problem, we can preload the correct version of +# libstdc++. +# +# export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6 +# +# Another solution is to make sure the module 'ten_runtime_python' is imported +# _after_ the module that requires another version of libstdc++ is imported. +# +# Refer to https://github.com/pytorch/pytorch/issues/102360?from_wecom=1#issuecomment-1708989096 + +pytest tests/ "$@" \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts_python/tests/conftest.py b/agents/ten_packages/extension/minimax_tts_python/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2175e36e06ea1b6b40e07c5cf1e134ee1aec17 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/tests/conftest.py @@ -0,0 +1,36 @@ +# +# Copyright © 2025 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +import pytest +import sys +import os +from ten import ( + unregister_all_addons_and_cleanup, +) + + +@pytest.fixture(scope="session", autouse=True) +def global_setup_and_teardown(): + # Set the environment variable. + os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] = "true" + + # Verify the environment variable is correctly set. + if ( + "TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE" not in os.environ + or os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] != "true" + ): + print( + "Failed to set TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE", + file=sys.stderr, + ) + sys.exit(1) + + # Yield control to the test; after the test execution is complete, continue + # with the teardown process. + yield + + # Teardown part. + unregister_all_addons_and_cleanup() \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_tts_python/tests/test_basic.py b/agents/ten_packages/extension/minimax_tts_python/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..fb639c32281f906013513afbf833db37125036c7 --- /dev/null +++ b/agents/ten_packages/extension/minimax_tts_python/tests/test_basic.py @@ -0,0 +1,35 @@ +# +# Copyright © 2024 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +from pathlib import Path +from ten import ExtensionTester, TenEnvTester, Cmd, CmdResult, StatusCode + + +class ExtensionTesterBasic(ExtensionTester): + def check_hello(self, ten_env: TenEnvTester, result: CmdResult): + statusCode = result.get_status_code() + print("receive hello_world, status:" + str(statusCode)) + + if statusCode == StatusCode.OK: + ten_env.stop_test() + + def on_start(self, ten_env: TenEnvTester) -> None: + new_cmd = Cmd.create("hello_world") + + print("send hello_world") + ten_env.send_cmd( + new_cmd, + lambda ten_env, result, _: self.check_hello(ten_env, result), + ) + + print("tester on_start_done") + ten_env.on_start_done() + + +def test_basic(): + tester = ExtensionTesterBasic() + tester.set_test_mode_single("minimax_tts_python") + tester.run() diff --git a/agents/ten_packages/extension/minimax_v2v_python/README.md b/agents/ten_packages/extension/minimax_v2v_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c73a53c1eb2e15ecf1842f5900bdfce02b15bab0 --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/README.md @@ -0,0 +1,36 @@ +# MiniMax Voice-to-Voice Extension + +A TEN extension that implements voice-to-voice conversation capabilities using MiniMax's API services. + +## Features + +- Real-time voice-to-voice conversation +- Support for streaming responses including assistant's voice, assisntant's transcript, and user's transcript +- Configurable voice settings +- Memory management for conversation context +- Asynchronous processing based on asyncio + + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). +`token` is mandatory to use MiniMax's API, others are optional. + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + + +## References +- [ChatCompletion v2](https://platform.minimaxi.com/document/ChatCompletion%20v2?key=66701d281d57f38758d581d0#ww1u9KZvwrgnF2EfpPrnHHGd) diff --git a/agents/ten_packages/extension/minimax_v2v_python/__init__.py b/agents/ten_packages/extension/minimax_v2v_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/minimax_v2v_python/addon.py b/agents/ten_packages/extension/minimax_v2v_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..248c8a12ec660d3953a9b2666e4d41b1fb250aa5 --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/addon.py @@ -0,0 +1,18 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("minimax_v2v_python") +class MinimaxV2VExtensionAddon(Addon): + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import MinimaxV2VExtension + ten_env.log_info("on_create_instance") + ten_env.on_create_instance_done(MinimaxV2VExtension(name), context) diff --git a/agents/ten_packages/extension/minimax_v2v_python/chat_memory.py b/agents/ten_packages/extension/minimax_v2v_python/chat_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..8ef98b1006167a53601689a2977e9c5366d25602 --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/chat_memory.py @@ -0,0 +1,42 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import threading + + +class ChatMemory: + def __init__(self, max_history_length): + self.max_history_length = max_history_length + self.history = [] + self.mutex = threading.Lock() # TODO: no need lock for asyncio + + def put(self, message): + with self.mutex: + self.history.append(message) + + while True: + history_count = len(self.history) + if history_count > 0 and history_count > self.max_history_length: + self.history.pop(0) + continue + if history_count > 0 and self.history[0]["role"] == "assistant": + # we cannot have an assistant message at the start of the chat history + # if after removal of the first, we have an assistant message, + # we need to remove the assistant message too + self.history.pop(0) + continue + break + + def get(self): + with self.mutex: + return self.history + + def count(self): + with self.mutex: + return len(self.history) + + def clear(self): + with self.mutex: + self.history = [] diff --git a/agents/ten_packages/extension/minimax_v2v_python/extension.py b/agents/ten_packages/extension/minimax_v2v_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..000801f63e08f0a98c1e284ebc778f9690e8b317 --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/extension.py @@ -0,0 +1,483 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + AudioFrame, + VideoFrame, + AudioFrameDataFmt, + AsyncExtension, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +from .util import duration_in_ms, duration_in_ms_since, Role +from .chat_memory import ChatMemory +from dataclasses import dataclass, fields +import builtins +import httpx +from datetime import datetime +import aiofiles +import asyncio +from typing import List, Dict, Tuple, Any +import base64 +import json + + +@dataclass +class MinimaxV2VConfig: + token: str = "" + max_tokens: int = 1024 + model: str = "abab6.5s-chat" + voice_model: str = "speech-01-turbo-240228" + voice_id: str = "female-tianmei" + in_sample_rate: int = 16000 + out_sample_rate: int = 32000 + prompt: str = ( + "You are a voice assistant who talks in a conversational way and can chat with me like my friends. I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. Don’t talk like a robot, instead I would like you to talk like a real human with emotions. I will use your answer for text-to-speech, so don’t return me any meaningless characters. I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. When giving me a list of options, express the options in a narrative way instead of bullet points." + ) + greeting: str = "" + max_memory_length: int = 10 + dump: bool = False + + async def read_from_property(self, ten_env: AsyncTenEnv): + for field in fields(self): + # 'is_property_exist' has a bug that can not be used in async extension currently, use it instead of try .. except once fixed + # if not ten_env.is_property_exist(field.name): + # continue + try: + match field.type: + case builtins.str: + val = await ten_env.get_property_string(field.name) + if val: + setattr(self, field.name, val) + ten_env.log_info(f"{field.name}={val}") + case builtins.int: + val = await ten_env.get_property_int(field.name) + setattr(self, field.name, val) + ten_env.log_info(f"{field.name}={val}") + case builtins.bool: + val = await ten_env.get_property_bool(field.name) + setattr(self, field.name, val) + ten_env.log_info(f"{field.name}={val}") + case _: + pass + except Exception as e: + ten_env.log_warn(f"get property for {field.name} failed, err {e}") + + +class MinimaxV2VExtension(AsyncExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + + self.config = MinimaxV2VConfig() + self.client = httpx.AsyncClient(timeout=httpx.Timeout(5)) + self.memory = ChatMemory(self.config.max_memory_length) + self.remote_stream_id = 0 + self.ten_env = None + + # able to cancel + self.curr_task = None + + # make sure tasks processing in order + self.process_input_task = None + self.queue = asyncio.Queue() + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await self.config.read_from_property(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + self.memory = ChatMemory(self.config.max_memory_length) + self.ten_env = ten_env + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + self.process_input_task = asyncio.create_task( + self._process_input(ten_env=ten_env, queue=self.queue), name="process_input" + ) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + + await self._flush(ten_env=ten_env) + self.queue.put_nowait(None) + if self.process_input_task: + self.process_input_task.cancel() + await asyncio.gather(self.process_input_task, return_exceptions=True) + self.process_input_task = None + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_deinit") + + if self.client: + await self.client.aclose() + self.client = None + self.ten_env = None + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + try: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + # process cmd + match cmd_name: + case "flush": + await self._flush(ten_env=ten_env) + await ten_env.send_cmd(Cmd.create("flush")) + ten_env.log_debug("flush done") + case _: + pass + await ten_env.return_result(CmdResult.create(StatusCode.OK), cmd) + except asyncio.CancelledError: + ten_env.log_warn(f"cmd {cmd_name} cancelled") + await ten_env.return_result(CmdResult.create(StatusCode.ERROR), cmd) + raise + except Exception as e: + ten_env.log_warn(f"cmd {cmd_name} failed, err {e}") + finally: + pass + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + pass + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + + try: + ts = datetime.now() + stream_id = audio_frame.get_property_int("stream_id") + if not self.remote_stream_id: + self.remote_stream_id = stream_id + + frame_buf = audio_frame.get_buf() + ten_env.log_debug(f"on audio frame {len(frame_buf)} {stream_id}") + + # process audio frame, must be after vad + # put_nowait to make sure put in_order + self.queue.put_nowait((ts, frame_buf)) + # await self._complete_with_history(ts, frame_buf) + + # dump input audio if need + await self._dump_audio_if_need(frame_buf, "in") + + # ten_env.log_debug(f"on audio frame {len(frame_buf)} {stream_id} put done") + except asyncio.CancelledError: + ten_env.log_warn("on audio frame cancelled") + raise + except Exception as e: + ten_env.log_error(f"on audio frame failed, err {e}") + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + pass + + async def _process_input(self, ten_env: AsyncTenEnv, queue: asyncio.Queue): + ten_env.log_info("process_input started") + + while True: + item = await queue.get() + if not item: + break + + (ts, frame_buf) = item + ten_env.log_debug(f"start process task {ts} {len(frame_buf)}") + + try: + self.curr_task = asyncio.create_task( + self._complete_with_history(ts, frame_buf) + ) + await self.curr_task + self.curr_task = None + except asyncio.CancelledError: + ten_env.log_warn("task cancelled") + except Exception as e: + ten_env.log_warn(f"task failed, err {e}") + finally: + queue.task_done() + + ten_env.log_info("process_input exit") + + async def _complete_with_history(self, ts: datetime, buff: bytearray): + start_time = datetime.now() + ten_env = self.ten_env + ten_env.log_debug( + f"start request, buff len {len(buff)}, queued_time {duration_in_ms(ts, start_time)}ms" + ) + + # prepare messages with prompt and history + messages = [] + if self.config.prompt: + messages.append({"role": Role.System, "content": self.config.prompt}) + messages.extend(self.memory.get()) + ten_env.log_debug(f"messages without audio: [{messages}]") + messages.append( + self._create_input_audio_message(buff=buff) + ) # don't print audio message + + # prepare request + url = "https://api.minimax.chat/v1/text/chatcompletion_v2" + (headers, payload) = self._create_request(messages) + + # vars to calculate Time to first byte + user_transcript_ttfb = None + assistant_transcript_ttfb = None + assistant_audio_ttfb = None + + # vars for transcript + user_transcript = "" + assistant_transcript = "" + + try: + # send POST request + async with self.client.stream( + "POST", url, headers=headers, json=payload + ) as response: + trace_id = response.headers.get("Trace-Id", "") + alb_receive_time = response.headers.get("alb_receive_time", "") + ten_env.log_info( + f"Get response trace-id: {trace_id}, alb_receive_time: {alb_receive_time}, cost_time {duration_in_ms_since(start_time)}ms" + ) + + response.raise_for_status() # check response + + i = 0 + async for line in response.aiter_lines(): + # ten_env.log_info(f"-> line {line}") + # if self._need_interrupt(ts): + # ten_env.log_warn(f"trace-id: {trace_id}, interrupted") + # if self.transcript: + # self.transcript += "[interrupted]" + # self._append_message("assistant", self.transcript) + # self._send_transcript("", "assistant", True) + # break + + if not line.startswith("data:"): + ten_env.log_debug(f"ignore line {len(line)}") + continue + i += 1 + + resp = json.loads(line.strip("data:")) + if resp.get("choices") and resp["choices"][0].get("delta"): + delta = resp["choices"][0]["delta"] + if delta.get("role") == "assistant": + # text content + if delta.get("content"): + content = delta["content"] + assistant_transcript += content + if not assistant_transcript_ttfb: + assistant_transcript_ttfb = duration_in_ms_since( + start_time + ) + ten_env.log_info( + f"trace-id {trace_id} chunck-{i} get assistant_transcript_ttfb {assistant_transcript_ttfb}ms, assistant transcript [{content}]" + ) + else: + ten_env.log_info( + f"trace-id {trace_id} chunck-{i} get assistant transcript [{content}]" + ) + + # send out for transcript display + self._send_transcript( + ten_env=ten_env, + content=content, + role=Role.Assistant, + end_of_segment=False, + ) + + # audio content + if ( + delta.get("audio_content") + and delta["audio_content"] != "" + ): + ten_env.log_info( + f"trace-id {trace_id} chunck-{i} get audio_content" + ) + if not assistant_audio_ttfb: + assistant_audio_ttfb = duration_in_ms_since( + start_time + ) + ten_env.log_info( + f"trace-id {trace_id} chunck-{i} get assistant_audio_ttfb {assistant_audio_ttfb}ms" + ) + + # send out + base64_str = delta["audio_content"] + buff = base64.b64decode(base64_str) + await self._dump_audio_if_need(buff, "out") + await self._send_audio_frame( + ten_env=ten_env, audio_data=buff + ) + + # tool calls + if delta.get("tool_calls"): + ten_env.log_warn(f"ignore tool call {delta}") + # TODO: add tool calls + continue + + if delta.get("role") == "user": + if delta.get("content"): + content = delta["content"] + user_transcript += content + if not user_transcript_ttfb: + user_transcript_ttfb = duration_in_ms_since( + start_time + ) + ten_env.log_info( + f"trace-id: {trace_id} chunck-{i} get user_transcript_ttfb {user_transcript_ttfb}ms, user transcript [{content}]" + ) + else: + ten_env.log_info( + f"trace-id {trace_id} chunck-{i} get user transcript [{content}]" + ) + + # send out for transcript display + self._send_transcript( + ten_env=ten_env, + content=content, + role=Role.User, + end_of_segment=True, + ) + + except httpx.TimeoutException: + ten_env.log_warn("http timeout") + except httpx.HTTPStatusError as e: + ten_env.log_warn(f"http status error: {e}") + except httpx.RequestError as e: + ten_env.log_warn(f"http request error: {e}") + finally: + ten_env.log_info( + f"http loop done, cost_time {duration_in_ms_since(start_time)}ms" + ) + if user_transcript: + self.memory.put({"role": Role.User, "content": user_transcript}) + if assistant_transcript: + self.memory.put( + {"role": Role.Assistant, "content": assistant_transcript} + ) + self._send_transcript( + ten_env=ten_env, + content="", + role=Role.Assistant, + end_of_segment=True, + ) + + def _create_input_audio_message(self, buff: bytearray) -> Dict[str, Any]: + message = { + "role": "user", + "content": [ + { + "type": "input_audio", + "input_audio": { + "data": base64.b64encode(buff).decode("utf-8"), + "format": "pcm", + "sample_rate": self.config.in_sample_rate, + "bit_depth": 16, + "channel": 1, + "encode": "base64", + }, + } + ], + } + return message + + def _create_request( + self, messages: List[Any] + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + config = self.config + + headers = { + "Authorization": f"Bearer {config.token}", + "Content-Type": "application/json", + } + + payload = { + "model": config.model, + "messages": messages, + "tool_choice": "none", + "stream": True, + "stream_options": {"speech_output": True}, # 开启语音输出 + "voice_setting": { + "model": config.voice_model, + "voice_id": config.voice_id, + }, + "audio_setting": { + "sample_rate": config.out_sample_rate, + "format": "pcm", + "channel": 1, + "encode": "base64", + }, + "tools": [{"type": "web_search"}], + "max_tokens": config.max_tokens, + "temperature": 0.8, + "top_p": 0.95, + } + + return (headers, payload) + + async def _send_audio_frame( + self, ten_env: AsyncTenEnv, audio_data: bytearray + ) -> None: + try: + f = AudioFrame.create("pcm_frame") + f.set_sample_rate(self.config.out_sample_rate) + f.set_bytes_per_sample(2) + f.set_number_of_channels(1) + f.set_data_fmt(AudioFrameDataFmt.INTERLEAVE) + f.set_samples_per_channel(len(audio_data) // 2) + f.alloc_buf(len(audio_data)) + buff = f.lock_buf() + buff[:] = audio_data + f.unlock_buf(buff) + await ten_env.send_audio_frame(f) + except Exception as e: + ten_env.log_error(f"send audio frame failed, err {e}") + + def _send_transcript( + self, + ten_env: AsyncTenEnv, + content: str, + role: str, + end_of_segment: bool, + ) -> None: + stream_id = self.remote_stream_id if role == "user" else 0 + + try: + d = Data.create("text_data") + d.set_property_string("text", content) + d.set_property_bool("is_final", True) + d.set_property_bool("end_of_segment", end_of_segment) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + ten_env.log_info( + f"send transcript text [{content}] {stream_id} end_of_segment {end_of_segment} role {role}" + ) + asyncio.create_task(self.ten_env.send_data(d)) + except Exception as e: + ten_env.log_warn( + f"send transcript text [{content}] {stream_id} end_of_segment {end_of_segment} role {role} failed, err {e}" + ) + + async def _flush(self, ten_env: AsyncTenEnv) -> None: + # clear queue + while not self.queue.empty(): + try: + self.queue.get_nowait() + self.queue.task_done() + except Exception as e: + ten_env.log_warn(f"flush queue error {e}") + + # cancel current task + if self.curr_task: + self.curr_task.cancel() + await asyncio.gather(self.curr_task, return_exceptions=True) + self.curr_task = None + + async def _dump_audio_if_need(self, buf: bytearray, suffix: str) -> None: + if not self.config.dump: + return + + async with aiofiles.open(f"minimax_v2v_{suffix}.pcm", "ab") as f: + await f.write(buf) diff --git a/agents/ten_packages/extension/minimax_v2v_python/manifest.json b/agents/ten_packages/extension/minimax_v2v_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..7c8f51226bd98a546d4facfa07a998a4cb8324fa --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/manifest.json @@ -0,0 +1,104 @@ +{ + "type": "extension", + "name": "minimax_v2v_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "**.py", + "README.md" + ] + }, + "api": { + "property": { + "token": { + "type": "string" + }, + "max_tokens": { + "type": "int32" + }, + "model": { + "type": "string" + }, + "voice_model": { + "type": "string" + }, + "voice_id": { + "type": "string" + }, + "in_sample_rate": { + "type": "int32" + }, + "out_sample_rate": { + "type": "int32" + }, + "prompt": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "max_memory_length": { + "type": "int32" + }, + "dump": { + "type": "bool" + } + }, + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_in": [ + { + "name": "pcm_frame", + "property": { + "stream_id": { + "type": "uint32" + } + } + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + }, + "end_of_segment": { + "type": "bool" + }, + "role": { + "type": "string" + }, + "stream_id": { + "type": "uint32" + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_v2v_python/property.json b/agents/ten_packages/extension/minimax_v2v_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_v2v_python/requirements.txt b/agents/ten_packages/extension/minimax_v2v_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..73f987013d0a3566769d30a7cb3fd788bc430d70 --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/requirements.txt @@ -0,0 +1,2 @@ +aiofiles +httpx \ No newline at end of file diff --git a/agents/ten_packages/extension/minimax_v2v_python/util.py b/agents/ten_packages/extension/minimax_v2v_python/util.py new file mode 100644 index 0000000000000000000000000000000000000000..f04119106b74353460933f1eeb67c7c31464961e --- /dev/null +++ b/agents/ten_packages/extension/minimax_v2v_python/util.py @@ -0,0 +1,15 @@ +from datetime import datetime + + +def duration_in_ms(start: datetime, end: datetime) -> int: + return int((end - start).total_seconds() * 1000) + + +def duration_in_ms_since(start: datetime) -> int: + return duration_in_ms(start, datetime.now()) + + +class Role(str): + System = "system" + User = "user" + Assistant = "assistant" diff --git a/agents/ten_packages/extension/neuphonic_tts/README.md b/agents/ten_packages/extension/neuphonic_tts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1a07dd7c42982c0301a3b9ce2802e383d005f94b --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/README.md @@ -0,0 +1,29 @@ +# neuphonic_tts + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/neuphonic_tts/__init__.py b/agents/ten_packages/extension/neuphonic_tts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/neuphonic_tts/addon.py b/agents/ten_packages/extension/neuphonic_tts/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..0632939f9c96a9803a88e14503ef2998e7291efe --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("neuphonic_tts") +class NeuphonicTTSExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import NeuphonicTTSExtension + ten_env.log_info("NeuphonicTTSExtensionAddon on_create_instance") + ten_env.on_create_instance_done(NeuphonicTTSExtension(name), context) diff --git a/agents/ten_packages/extension/neuphonic_tts/extension.py b/agents/ten_packages/extension/neuphonic_tts/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..6f30bc6dbbacf502f29dc49dafaca13f27c05b82 --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/extension.py @@ -0,0 +1,55 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import traceback + +from .neuphonic_tts import NeuphonicTTS, NeuphonicTTSConfig +from ten import ( + AsyncTenEnv, +) +from ten_ai_base.tts import AsyncTTSBaseExtension + + +class NeuphonicTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + self.config = None + self.client = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + try: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.config = await NeuphonicTTSConfig.create_async(ten_env=ten_env) + + if not self.config.api_key: + raise ValueError("api_key is required") + + self.client = NeuphonicTTS(self.config) + except Exception: + ten_env.log_error(f"on_start failed: {traceback.format_exc()}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + response = self.client.text_to_speech_stream(input_text) + + async for message in response: + await self.send_audio_out(ten_env, message.data.audio) + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + return await super().on_cancel_tts(ten_env) diff --git a/agents/ten_packages/extension/neuphonic_tts/manifest.json b/agents/ten_packages/extension/neuphonic_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..e466f9930d521bce8f07983a09145e62ae5c11cc --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/manifest.json @@ -0,0 +1,67 @@ +{ + "type": "extension", + "name": "neuphonic_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "lang_code": { + "type": "string" + }, + "sample_rate": { + "type": "int64" + }, + "voice_id": { + "type": "string" + }, + "encoding": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/neuphonic_tts/neuphonic_tts.py b/agents/ten_packages/extension/neuphonic_tts/neuphonic_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..10e09c95981443c0d457e65567db4b38ffaf2739 --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/neuphonic_tts.py @@ -0,0 +1,42 @@ +# +# +# Agora Real Time Engagement +# Created by XinHui Li in 2024. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from dataclasses import dataclass +from typing import AsyncIterator, Optional, Literal +from pyneuphonic import Neuphonic, TTSConfig +from pyneuphonic.models import APIResponse, TTSResponse + +from ten_ai_base.config import BaseConfig + + +@dataclass +class NeuphonicTTSConfig(BaseConfig): + api_key: str = "" + lang_code: str = "en" + sample_rate: int = 16000 + voice_id: Optional[str] = None + speed: float = 1.0 + encoding: Literal["pcm_linear", "pcm_mulaw"] = "pcm_linear" + request_timeout_seconds: int = 10 + +class NeuphonicTTS: + def __init__(self, config: NeuphonicTTSConfig) -> None: + self.config = config + self.client = Neuphonic(api_key=config.api_key) + + def text_to_speech_stream(self, text: str) -> AsyncIterator[APIResponse[TTSResponse]]: + sse_client = self.client.tts.AsyncSSEClient(timeout=self.config.request_timeout_seconds) + tts_config = TTSConfig( + lang_code=self.config.lang_code, + sampling_rate=self.config.sample_rate, + voice_id=self.config.voice_id, + speed=self.config.speed, + encoding=self.config.encoding, + ) + + return sse_client.send(text, tts_config=tts_config) diff --git a/agents/ten_packages/extension/neuphonic_tts/property.json b/agents/ten_packages/extension/neuphonic_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..d13b2c4ee96e56f84ed6dbfb328a82634eead740 --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/property.json @@ -0,0 +1,5 @@ +{ + "api_key": "${env:NEUPHONIC_API_KEY}", + "lang_code": "en", + "sample_rate": 16000 +} \ No newline at end of file diff --git a/agents/ten_packages/extension/neuphonic_tts/requirements.txt b/agents/ten_packages/extension/neuphonic_tts/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f9d833f438be6458c56126b7bcfe909614b2e77e --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/requirements.txt @@ -0,0 +1 @@ +pyneuphonic~=1.5.13 diff --git a/agents/ten_packages/extension/neuphonic_tts/tests/bin/start b/agents/ten_packages/extension/neuphonic_tts/tests/bin/start new file mode 100644 index 0000000000000000000000000000000000000000..04d784ea179c32ded5fc50565fb28b4ae0585c6b --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/tests/bin/start @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")/../.." + +export PYTHONPATH=.ten/app:.ten/app/ten_packages/system/ten_runtime_python/lib:.ten/app/ten_packages/system/ten_runtime_python/interface:.ten/app/ten_packages/system/ten_ai_base/interface:$PYTHONPATH + +# If the Python app imports some modules that are compiled with a different +# version of libstdc++ (ex: PyTorch), the Python app may encounter confusing +# errors. To solve this problem, we can preload the correct version of +# libstdc++. +# +# export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6 +# +# Another solution is to make sure the module 'ten_runtime_python' is imported +# _after_ the module that requires another version of libstdc++ is imported. +# +# Refer to https://github.com/pytorch/pytorch/issues/102360?from_wecom=1#issuecomment-1708989096 + +pytest tests/ "$@" \ No newline at end of file diff --git a/agents/ten_packages/extension/neuphonic_tts/tests/conftest.py b/agents/ten_packages/extension/neuphonic_tts/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2175e36e06ea1b6b40e07c5cf1e134ee1aec17 --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/tests/conftest.py @@ -0,0 +1,36 @@ +# +# Copyright © 2025 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +import pytest +import sys +import os +from ten import ( + unregister_all_addons_and_cleanup, +) + + +@pytest.fixture(scope="session", autouse=True) +def global_setup_and_teardown(): + # Set the environment variable. + os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] = "true" + + # Verify the environment variable is correctly set. + if ( + "TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE" not in os.environ + or os.environ["TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE"] != "true" + ): + print( + "Failed to set TEN_DISABLE_ADDON_UNREGISTER_AFTER_APP_CLOSE", + file=sys.stderr, + ) + sys.exit(1) + + # Yield control to the test; after the test execution is complete, continue + # with the teardown process. + yield + + # Teardown part. + unregister_all_addons_and_cleanup() \ No newline at end of file diff --git a/agents/ten_packages/extension/neuphonic_tts/tests/test_basic.py b/agents/ten_packages/extension/neuphonic_tts/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..273d2798b3297342422267e1c6495e9d4e523b71 --- /dev/null +++ b/agents/ten_packages/extension/neuphonic_tts/tests/test_basic.py @@ -0,0 +1,35 @@ +# +# Copyright © 2024 Agora +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0, with certain conditions. +# Refer to the "LICENSE" file in the root directory for more information. +# +from pathlib import Path +from ten import ExtensionTester, TenEnvTester, Cmd, CmdResult, StatusCode + + +class ExtensionTesterBasic(ExtensionTester): + def check_hello(self, ten_env: TenEnvTester, result: CmdResult): + statusCode = result.get_status_code() + print("receive hello_world, status:" + str(statusCode)) + + if statusCode == StatusCode.OK: + ten_env.stop_test() + + def on_start(self, ten_env: TenEnvTester) -> None: + new_cmd = Cmd.create("hello_world") + + print("send hello_world") + ten_env.send_cmd( + new_cmd, + lambda ten_env, result, _: self.check_hello(ten_env, result), + ) + + print("tester on_start_done") + ten_env.on_start_done() + + +def test_basic(): + tester = ExtensionTesterBasic() + tester.set_test_mode_single("neuphonic_tts") + tester.run() diff --git a/agents/ten_packages/extension/openai_chatgpt/README.md b/agents/ten_packages/extension/openai_chatgpt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/ten_packages/extension/openai_chatgpt/go.mod b/agents/ten_packages/extension/openai_chatgpt/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..02a0985657695d2cb2c46820670f7dae20df5bb2 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/go.mod @@ -0,0 +1,17 @@ +module openai_chatgpt + +go 1.20 + +replace ten_framework => ../../system/ten_runtime_go/interface + +require ( + github.com/sashabaranov/go-openai v1.24.1 + github.com/stretchr/testify v1.9.0 + ten_framework v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/agents/ten_packages/extension/openai_chatgpt/go.sum b/agents/ten_packages/extension/openai_chatgpt/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..64a09f354de102e5fd9ddacd9dbb8d520f370f94 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sashabaranov/go-openai v1.24.1 h1:DWK95XViNb+agQtuzsn+FyHhn3HQJ7Va8z04DQDJ1MI= +github.com/sashabaranov/go-openai v1.24.1/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/agents/ten_packages/extension/openai_chatgpt/manifest.json b/agents/ten_packages/extension/openai_chatgpt/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..e5b5b8d57ac635890e15568f09415132d04cb890 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/manifest.json @@ -0,0 +1,76 @@ +{ + "type": "extension", + "name": "openai_chatgpt", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_go", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "frequency_penalty": { + "type": "float64" + }, + "presence_penalty": { + "type": "float64" + }, + "model": { + "type": "string" + }, + "max_tokens": { + "type": "int64" + }, + "prompt": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "end_of_segment": { + "type": "bool" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_chatgpt/openai_chatgpt.go b/agents/ten_packages/extension/openai_chatgpt/openai_chatgpt.go new file mode 100644 index 0000000000000000000000000000000000000000..1a09ed14138993f88d1afbea0df86b3e7c14a8d2 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/openai_chatgpt.go @@ -0,0 +1,111 @@ +/** + * + * Agora Real Time Engagement + * Created by lixinhui in 2024. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "net/url" + + openai "github.com/sashabaranov/go-openai" +) + +type openaiChatGPT struct { + client *openai.Client + config openaiChatGPTConfig +} + +type openaiChatGPTConfig struct { + BaseUrl string + ApiKey string + + Model string + Prompt string + + FrequencyPenalty float32 + PresencePenalty float32 + TopP float32 + Temperature float32 + MaxTokens int + Seed int + + ProxyUrl string +} + +func defaultOpenaiChatGPTConfig() openaiChatGPTConfig { + return openaiChatGPTConfig{ + BaseUrl: "https://api.openai.com/v1", + ApiKey: "", + + Model: openai.GPT4o, + Prompt: "You are a voice assistant who talks in a conversational way and can chat with me like my friends. i will speak to you in english or chinese, and you will answer in the corrected and improved version of my text with the language i use. Don't talk like a robot, instead i would like you to talk like real human with emotions. i will use your answer for text-to-speech, so don't return me any meaningless characters. I want you to be helpful, when i'm asking you for advices, give me precise, practical and useful advices instead of being vague. When giving me list of options, express the options in a narrative way instead of bullet points.", + + FrequencyPenalty: 0.9, + PresencePenalty: 0.9, + TopP: 1.0, + Temperature: 0.1, + MaxTokens: 512, + Seed: rand.Int(), + + ProxyUrl: "", + } +} + +func newOpenaiChatGPT(config openaiChatGPTConfig) (*openaiChatGPT, error) { + conf := openai.DefaultConfig(config.ApiKey) + + if config.BaseUrl != "" { + conf.BaseURL = config.BaseUrl + } + + if config.ProxyUrl != "" { + proxyUrl, err := url.Parse(config.ProxyUrl) + if err != nil { + return nil, fmt.Errorf("newOpenaiChatGPT failed on parsing proxy url, err: %v", err) + } + conf.HTTPClient = &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(proxyUrl)}} + } + + return &openaiChatGPT{ + config: config, + client: openai.NewClientWithConfig(conf), + }, nil +} + +func (c *openaiChatGPT) getChatCompletionsStream(messages []openai.ChatCompletionMessage) (*openai.ChatCompletionStream, error) { + req := openai.ChatCompletionRequest{ + Temperature: c.config.Temperature, + TopP: c.config.TopP, + PresencePenalty: c.config.PresencePenalty, + FrequencyPenalty: c.config.FrequencyPenalty, + MaxTokens: c.config.MaxTokens, + Seed: &c.config.Seed, + Messages: append( + []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: c.config.Prompt, + }, + }, + messages..., + ), + Model: c.config.Model, + Stream: true, + } + + resp, err := c.client.CreateChatCompletionStream(context.Background(), req) + if err != nil { + return nil, fmt.Errorf("CreateChatCompletionStream failed,err: %v", err) + } + return resp, nil +} diff --git a/agents/ten_packages/extension/openai_chatgpt/openai_chatgpt_extension.go b/agents/ten_packages/extension/openai_chatgpt/openai_chatgpt_extension.go new file mode 100644 index 0000000000000000000000000000000000000000..97ac2b9fece819b0acc33ca68807e3eba7863646 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/openai_chatgpt_extension.go @@ -0,0 +1,389 @@ +/** + * + * Agora Real Time Engagement + * Created by lixinhui in 2024. + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +// Note that this is just an example extension written in the GO programming +// language, so the package name does not equal to the containing directory +// name. However, it is not common in Go. +package extension + +import ( + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "ten_framework/ten" + + openai "github.com/sashabaranov/go-openai" +) + +type openaiChatGPTExtension struct { + ten.DefaultExtension + openaiChatGPT *openaiChatGPT +} + +const ( + cmdInFlush = "flush" + cmdOutFlush = "flush" + dataInTextDataPropertyText = "text" + dataInTextDataPropertyIsFinal = "is_final" + dataOutTextDataPropertyText = "text" + dataOutTextDataPropertyTextEndOfSegment = "end_of_segment" + + propertyBaseUrl = "base_url" // Optional + propertyApiKey = "api_key" // Required + propertyModel = "model" // Optional + propertyPrompt = "prompt" // Optional + propertyFrequencyPenalty = "frequency_penalty" // Optional + propertyPresencePenalty = "presence_penalty" // Optional + propertyTemperature = "temperature" // Optional + propertyTopP = "top_p" // Optional + propertyMaxTokens = "max_tokens" // Optional + propertyGreeting = "greeting" // Optional + propertyProxyUrl = "proxy_url" // Optional + propertyMaxMemoryLength = "max_memory_length" // Optional +) + +var ( + memory []openai.ChatCompletionMessage + memoryChan chan openai.ChatCompletionMessage + maxMemoryLength = 10 + + outdateTs atomic.Int64 + wg sync.WaitGroup +) + +func newChatGPTExtension(name string) ten.Extension { + return &openaiChatGPTExtension{} +} + +// OnStart will be called when the extension is starting, +// properies can be read here to initialize and start the extension. +// current supported properties: +// - api_key (required) +// - model +// - prompt +// - frequency_penalty +// - presence_penalty +// - temperature +// - top_p +// - max_tokens +// - greeting +// - proxy_url +func (p *openaiChatGPTExtension) OnStart(tenEnv ten.TenEnv) { + tenEnv.LogInfo("OnStart") + + // prepare configuration + openaiChatGPTConfig := defaultOpenaiChatGPTConfig() + + if baseUrl, err := tenEnv.GetPropertyString(propertyBaseUrl); err != nil { + tenEnv.LogError(fmt.Sprintf("GetProperty required %s failed, err: %v", propertyBaseUrl, err)) + } else { + if len(baseUrl) > 0 { + openaiChatGPTConfig.BaseUrl = baseUrl + } + } + + if apiKey, err := tenEnv.GetPropertyString(propertyApiKey); err != nil { + tenEnv.LogError(fmt.Sprintf("GetProperty required %s failed, err: %v", propertyApiKey, err)) + return + } else { + openaiChatGPTConfig.ApiKey = apiKey + } + + if model, err := tenEnv.GetPropertyString(propertyModel); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s error:%v", propertyModel, err)) + } else { + if len(model) > 0 { + openaiChatGPTConfig.Model = model + } + } + + if prompt, err := tenEnv.GetPropertyString(propertyPrompt); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s error:%v", propertyPrompt, err)) + } else { + if len(prompt) > 0 { + openaiChatGPTConfig.Prompt = prompt + } + } + + if frequencyPenalty, err := tenEnv.GetPropertyFloat64(propertyFrequencyPenalty); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyFrequencyPenalty, err)) + } else { + openaiChatGPTConfig.FrequencyPenalty = float32(frequencyPenalty) + } + + if presencePenalty, err := tenEnv.GetPropertyFloat64(propertyPresencePenalty); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyPresencePenalty, err)) + } else { + openaiChatGPTConfig.PresencePenalty = float32(presencePenalty) + } + + if temperature, err := tenEnv.GetPropertyFloat64(propertyTemperature); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyTemperature, err)) + } else { + openaiChatGPTConfig.Temperature = float32(temperature) + } + + if topP, err := tenEnv.GetPropertyFloat64(propertyTopP); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyTopP, err)) + } else { + openaiChatGPTConfig.TopP = float32(topP) + } + + if maxTokens, err := tenEnv.GetPropertyInt64(propertyMaxTokens); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyMaxTokens, err)) + } else { + if maxTokens > 0 { + openaiChatGPTConfig.MaxTokens = int(maxTokens) + } + } + + if proxyUrl, err := tenEnv.GetPropertyString(propertyProxyUrl); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyProxyUrl, err)) + } else { + openaiChatGPTConfig.ProxyUrl = proxyUrl + } + + greeting, err := tenEnv.GetPropertyString(propertyGreeting) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyGreeting, err)) + } + + if propMaxMemoryLength, err := tenEnv.GetPropertyInt64(propertyMaxMemoryLength); err != nil { + tenEnv.LogWarn(fmt.Sprintf("GetProperty optional %s failed, err: %v", propertyMaxMemoryLength, err)) + } else { + if propMaxMemoryLength > 0 { + maxMemoryLength = int(propMaxMemoryLength) + } + } + + // create openaiChatGPT instance + openaiChatgpt, err := newOpenaiChatGPT(openaiChatGPTConfig) + if err != nil { + tenEnv.LogError(fmt.Sprintf("newOpenaiChatGPT failed, err: %v", err)) + return + } + tenEnv.LogInfo(fmt.Sprintf("newOpenaiChatGPT succeed with max_tokens: %d, model: %s", + openaiChatGPTConfig.MaxTokens, openaiChatGPTConfig.Model)) + + p.openaiChatGPT = openaiChatgpt + + memoryChan = make(chan openai.ChatCompletionMessage, maxMemoryLength*2) + + // send greeting if available + if len(greeting) > 0 { + outputData, _ := ten.NewData("text_data") + outputData.SetProperty(dataOutTextDataPropertyText, greeting) + outputData.SetProperty(dataOutTextDataPropertyTextEndOfSegment, true) + if err := tenEnv.SendData(outputData, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("greeting [%s] send failed, err: %v", greeting, err)) + } else { + tenEnv.LogInfo(fmt.Sprintf("greeting [%s] sent", greeting)) + } + } + + tenEnv.OnStartDone() +} + +// OnCmd receives cmd from ten graph. +// current supported cmd: +// - name: flush +// example: +// {"name": "flush"} +func (p *openaiChatGPTExtension) OnCmd( + tenEnv ten.TenEnv, + cmd ten.Cmd, +) { + cmdName, err := cmd.GetName() + if err != nil { + tenEnv.LogError(fmt.Sprintf("OnCmd get name failed, err: %v", err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + tenEnv.LogInfo(fmt.Sprintf("OnCmd %s", cmdInFlush)) + + switch cmdName { + case cmdInFlush: + outdateTs.Store(time.Now().UnixMicro()) + + wg.Wait() // wait for chat completion stream to finish + + // send out + outCmd, err := ten.NewCmd(cmdOutFlush) + if err != nil { + tenEnv.LogError(fmt.Sprintf("new cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } + if err := tenEnv.SendCmd(outCmd, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("send cmd %s failed, err: %v", cmdOutFlush, err)) + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeError) + tenEnv.ReturnResult(cmdResult, cmd, nil) + return + } else { + tenEnv.LogInfo(fmt.Sprintf("cmd %s sent", cmdOutFlush)) + } + } + cmdResult, _ := ten.NewCmdResult(ten.StatusCodeOk) + tenEnv.ReturnResult(cmdResult, cmd, nil) +} + +// OnData receives data from ten graph. +// current supported data: +// - name: text_data +// example: +// {"name": "text_data", "properties": {"text": "hello", "is_final": true} +func (p *openaiChatGPTExtension) OnData( + tenEnv ten.TenEnv, + data ten.Data, +) { + // Get isFinal + isFinal, err := data.GetPropertyBool(dataInTextDataPropertyIsFinal) + if err != nil { + tenEnv.LogWarn(fmt.Sprintf("OnData GetProperty %s failed, err: %v", dataInTextDataPropertyIsFinal, err)) + return + } + if !isFinal { // ignore non-final + tenEnv.LogDebug("ignore non-final input") + return + } + + // Get input text + inputText, err := data.GetPropertyString(dataInTextDataPropertyText) + if err != nil { + tenEnv.LogError(fmt.Sprintf("OnData GetProperty %s failed, err: %v", dataInTextDataPropertyText, err)) + return + } + if len(inputText) == 0 { + tenEnv.LogDebug("ignore empty text") + return + } + tenEnv.LogInfo(fmt.Sprintf("OnData input text: [%s]", inputText)) + + // prepare memory + for len(memoryChan) > 0 { + m, ok := <-memoryChan + if !ok { + break + } + memory = append(memory, m) + if len(memory) > maxMemoryLength { + memory = memory[1:] + } + } + memory = append(memory, openai.ChatCompletionMessage{ + Role: openai.ChatMessageRoleUser, + Content: inputText, + }) + if len(memory) > maxMemoryLength { + memory = memory[1:] + } + + // start goroutine to request and read responses from openai + wg.Add(1) + go func(startTime time.Time, inputText string, memory []openai.ChatCompletionMessage) { + defer wg.Done() + tenEnv.LogInfo(fmt.Sprintf("GetChatCompletionsStream for input text: [%s] memory: %v", inputText, memory)) + + // Get result from ai + resp, err := p.openaiChatGPT.getChatCompletionsStream(memory) + if err != nil { + tenEnv.LogError(fmt.Sprintf("GetChatCompletionsStream for input text: [%s] failed, err: %v", inputText, err)) + return + } + defer func() { + if resp != nil { // Close stream object + resp.Close() + } + }() + tenEnv.LogDebug(fmt.Sprintf("GetChatCompletionsStream start to recv for input text: [%s]", inputText)) + + var sentence, fullContent string + var firstSentenceSent bool + for { + if startTime.UnixMicro() < outdateTs.Load() { // Check whether to interrupt + tenEnv.LogInfo(fmt.Sprintf("GetChatCompletionsStream recv interrupt and flushing for input text: [%s], startTs: %d, outdateTs: %d", + inputText, startTime.UnixMicro(), outdateTs.Load())) + break + } + + chatCompletions, err := resp.Recv() + if errors.Is(err, io.EOF) { + tenEnv.LogDebug(fmt.Sprintf("GetChatCompletionsStream recv for input text: [%s], io.EOF break", inputText)) + break + } + + var content string + if len(chatCompletions.Choices) > 0 && chatCompletions.Choices[0].Delta.Content != "" { + content = chatCompletions.Choices[0].Delta.Content + } + fullContent += content + + for { + // feed content and check whether sentence is available + var sentenceIsFinal bool + sentence, content, sentenceIsFinal = parseSentence(sentence, content) + if len(sentence) == 0 || !sentenceIsFinal { + tenEnv.LogDebug(fmt.Sprintf("sentence %s is empty or not final", sentence)) + break + } + tenEnv.LogDebug(fmt.Sprintf("GetChatCompletionsStream recv for input text: [%s] got sentence: [%s]", inputText, sentence)) + + // send sentence + outputData, err := ten.NewData("text_data") + if err != nil { + tenEnv.LogError(fmt.Sprintf("NewData failed, err: %v", err)) + break + } + outputData.SetProperty(dataOutTextDataPropertyText, sentence) + outputData.SetProperty(dataOutTextDataPropertyTextEndOfSegment, false) + if err := tenEnv.SendData(outputData, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("GetChatCompletionsStream recv for input text: [%s] send sentence [%s] failed, err: %v", inputText, sentence, err)) + break + } else { + tenEnv.LogInfo(fmt.Sprintf("GetChatCompletionsStream recv for input text: [%s] sent sentence [%s]", inputText, sentence)) + } + sentence = "" + + if !firstSentenceSent { + firstSentenceSent = true + tenEnv.LogInfo(fmt.Sprintf("GetChatCompletionsStream recv for input text: [%s] first sentence sent, first_sentency_latency %dms", + inputText, time.Since(startTime).Milliseconds())) + } + } + } + + // remember response as assistant content in memory + memoryChan <- openai.ChatCompletionMessage{ + Role: openai.ChatMessageRoleAssistant, + Content: fullContent, + } + + // send end of segment + outputData, _ := ten.NewData("text_data") + outputData.SetProperty(dataOutTextDataPropertyText, sentence) + outputData.SetProperty(dataOutTextDataPropertyTextEndOfSegment, true) + if err := tenEnv.SendData(outputData, nil); err != nil { + tenEnv.LogError(fmt.Sprintf("GetChatCompletionsStream for input text: [%s] end of segment with sentence [%s] send failed, err: %v", inputText, sentence, err)) + } else { + tenEnv.LogInfo(fmt.Sprintf("GetChatCompletionsStream for input text: [%s] end of segment with sentence [%s] sent", inputText, sentence)) + } + }(time.Now(), inputText, append([]openai.ChatCompletionMessage{}, memory...)) +} + +func init() { + // Register addon + ten.RegisterAddonAsExtension( + "openai_chatgpt", + ten.NewDefaultExtensionAddon(newChatGPTExtension), + ) +} diff --git a/agents/ten_packages/extension/openai_chatgpt/property.json b/agents/ten_packages/extension/openai_chatgpt/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_chatgpt/sentence.go b/agents/ten_packages/extension/openai_chatgpt/sentence.go new file mode 100644 index 0000000000000000000000000000000000000000..e9b9d31045af859d6ef91516dc8b54e1b01161ef --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/sentence.go @@ -0,0 +1,30 @@ +package extension + +func isPunctuation(r rune) bool { + if r == ',' || r == ',' || + r == '.' || r == '。' || + r == '?' || r == '?' || + r == '!' || r == '!' { + return true + } + return false +} + +func parseSentence(sentence, content string) (string, string, bool) { + var remain string + var foundPunc bool + + for _, r := range content { + if !foundPunc { + sentence += string(r) + } else { + remain += string(r) + } + + if !foundPunc && isPunctuation(r) { + foundPunc = true + } + } + + return sentence, remain, foundPunc +} diff --git a/agents/ten_packages/extension/openai_chatgpt/sentence_test.go b/agents/ten_packages/extension/openai_chatgpt/sentence_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b09fe30764534061471440c5c54467cafcbfb3f6 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt/sentence_test.go @@ -0,0 +1,150 @@ +package extension + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsPunctuation(t *testing.T) { + cases := []struct { + r rune + expect bool + }{ + {',', true}, + {',', true}, + {'.', true}, + {'。', true}, + {'?', true}, + {'?', true}, + {'!', true}, + {'!', true}, + + {'a', false}, + {'0', false}, + } + + for i, c := range cases { + require.Equal(t, c.expect, isPunctuation(c.r), "case %d", i) + } +} + +func TestSplitByPunctuation(t *testing.T) { + cases := []struct { + s string + expect []string + }{ + {"Hello world!", []string{"Hello world"}}, + {"Hey, there!", []string{"Hey", " there"}}, + } + + for i, c := range cases { + out := strings.FieldsFunc(c.s, isPunctuation) + require.Equal(t, c.expect, out, "case %d", i) + } +} + +func TestParseSentence_Should_NoFinalSentence(t *testing.T) { + cases := []struct { + sentence string + content string + + expectSentence string + expectContent string + }{ + { + sentence: "", + content: "", + expectSentence: "", + expectContent: "", + }, + { + sentence: "a", + content: "", + expectSentence: "a", + expectContent: "", + }, + { + sentence: "", + content: "a", + expectSentence: "a", + expectContent: "", + }, + { + sentence: "abc", + content: "ddd", + expectSentence: "abcddd", + expectContent: "", + }, + } + + for i, c := range cases { + sentence, content, final := parseSentence(c.sentence, c.content) + require.False(t, final, "case %d", i) + + require.Equal(t, c.expectSentence, sentence, "case %d", i) + require.Equal(t, c.expectContent, content, "case %d", i) + } +} + +func TestParseSentence_Should_FinalSentence(t *testing.T) { + cases := []struct { + sentence string + content string + + expectSentence string + expectContent string + }{ + { + sentence: "", + content: ",", + expectSentence: ",", + expectContent: "", + }, + { + sentence: "", + content: ",ddd", + expectSentence: ",", + expectContent: "ddd", + }, + { + sentence: "abc", + content: ",ddd", + expectSentence: "abc,", + expectContent: "ddd", + }, + { + sentence: "abc", + content: "dd,d", + expectSentence: "abcdd,", + expectContent: "d", + }, + { + sentence: "abc", + content: "ddd,", + expectSentence: "abcddd,", + expectContent: "", + }, + { + sentence: "abc", + content: "ddd,eee,fff,", + expectSentence: "abcddd,", + expectContent: "eee,fff,", + }, + { + sentence: "我的", + content: "你好,啊!", + expectSentence: "我的你好,", + expectContent: "啊!", + }, + } + + for i, c := range cases { + sentence, content, final := parseSentence(c.sentence, c.content) + require.True(t, final, "case %d", i) + + require.Equal(t, c.expectSentence, sentence, "case %d", i) + require.Equal(t, c.expectContent, content, "case %d", i) + } +} diff --git a/agents/ten_packages/extension/openai_chatgpt_python/README.md b/agents/ten_packages/extension/openai_chatgpt_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2a9b1c8214048ef3a0df141602778bc7105cd43c --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/README.md @@ -0,0 +1,60 @@ +# openai_chatgpt_python + +An extension for integrating OpenAI's GPT models (e.g., GPT-4) into your application, providing configurable AI-driven features such as conversational agents, task automation, and tool integration. + +## Features + + + +- OpenAI GPT Integration: Leverage GPT models for text processing and conversational tasks. +- Configurable: Easily customize API keys, model settings, prompts, temperature, etc. +- Async Queue Processing: Supports real-time message processing with task cancellation and prioritization. +- Tool Support: Integrate external tools like image recognition via OpenAI's API. + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +| **Property** | **Type** | **Description** | +|----------------------------|------------|-------------------------------------------| +| `api_key` | `string` | API key for authenticating with OpenAI | +| `frequency_penalty` | `float64` | Controls how much to penalize new tokens based on their existing frequency in the text so far | +| `presence_penalty` | `float64` | Controls how much to penalize new tokens based on whether they appear in the text so far | +| `temperature` | `float64` | Sampling temperature, higher values mean more randomness | +| `top_p` | `float64` | Nucleus sampling, chooses tokens with cumulative probability `p` | +| `model` | `string` | Model identifier (e.g., GPT-3.5, GPT-4) | +| `max_tokens` | `int64` | Maximum number of tokens to generate | +| `base_url` | `string` | API base URL | +| `prompt` | `string` | Default prompt to send to the model | +| `greeting` | `string` | Greeting message to be used | +| `checking_vision_text_items`| `string` | Items for checking vision-based text responses | +| `proxy_url` | `string` | URL of the proxy server | +| `max_memory_length` | `int64` | Maximum memory length for processing | +| `enable_tools` | `bool` | Flag to enable or disable external tools | + +### Data In: +| **Name** | **Property** | **Type** | **Description** | +|----------------|--------------|------------|-------------------------------| +| `text_data` | `text` | `string` | Incoming text data | + +### Data Out: +| **Name** | **Property** | **Type** | **Description** | +|----------------|--------------|------------|-------------------------------| +| `text_data` | `text` | `string` | Outgoing text data | + +### Command In: +| **Name** | **Description** | +|----------------|---------------------------------------------| +| `flush` | Command to flush the current processing state | + +### Command Out: +| **Name** | **Description** | +|----------------|---------------------------------------------| +| `flush` | Response after flushing the current state | + +### Video Frame In: +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `video_frame` | Video frame input for vision processing | diff --git a/agents/ten_packages/extension/openai_chatgpt_python/__init__.py b/agents/ten_packages/extension/openai_chatgpt_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd75ddef4ae8e15366d6ed94ee557e6481a4989 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/openai_chatgpt_python/addon.py b/agents/ten_packages/extension/openai_chatgpt_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..18fed90cc06853ef4cb75adcdea38b2ee5969ea6 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/addon.py @@ -0,0 +1,21 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("openai_chatgpt_python") +class OpenAIChatGPTExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import OpenAIChatGPTExtension + ten_env.log_info("OpenAIChatGPTExtensionAddon on_create_instance") + ten_env.on_create_instance_done(OpenAIChatGPTExtension(name), context) diff --git a/agents/ten_packages/extension/openai_chatgpt_python/extension.py b/agents/ten_packages/extension/openai_chatgpt_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a853f6e5a05c1c49d82673cdfc70e4da3ad20c48 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/extension.py @@ -0,0 +1,426 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import asyncio +import json +import time +import traceback +from typing import Iterable +import uuid + +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.const import CMD_PROPERTY_RESULT, CMD_TOOL_CALL, CONTENT_DATA_OUT_NAME, DATA_OUT_PROPERTY_END_OF_SEGMENT, DATA_OUT_PROPERTY_TEXT +from ten_ai_base.helper import ( + AsyncEventEmitter, + get_property_bool, + get_property_string, +) +from ten_ai_base.types import ( + LLMCallCompletionArgs, + LLMChatCompletionContentPartParam, + LLMChatCompletionUserMessageParam, + LLMChatCompletionMessageParam, + LLMDataCompletionArgs, + LLMToolMetadata, + LLMToolResult, +) +from ten_ai_base.llm import AsyncLLMBaseExtension + +from .helper import parse_sentences +from .openai import OpenAIChatGPT, OpenAIChatGPTConfig +from ten import ( + Cmd, + StatusCode, + CmdResult, + Data, +) + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT = "end_of_segment" + + +class OpenAIChatGPTExtension(AsyncLLMBaseExtension): + def __init__(self, name: str): + super().__init__(name) + self.memory = [] + self.memory_cache = [] + self.config = None + self.client = None + self.sentence_fragment = "" + self.tool_task_future: asyncio.Future | None = None + self.users_count = 0 + self.last_reasoning_ts = 0 + + async def on_init(self, async_ten_env: AsyncTenEnv) -> None: + async_ten_env.log_info("on_init") + await super().on_init(async_ten_env) + + async def on_start(self, async_ten_env: AsyncTenEnv) -> None: + async_ten_env.log_info("on_start") + await super().on_start(async_ten_env) + + self.config = await OpenAIChatGPTConfig.create_async(ten_env=async_ten_env) + + # Mandatory properties + if not self.config.api_key: + async_ten_env.log_info("API key is missing, exiting on_start") + return + + # Create instance + try: + self.client = OpenAIChatGPT(async_ten_env, self.config) + async_ten_env.log_info( + f"initialized with max_tokens: {self.config.max_tokens}, model: {self.config.model}, vendor: {self.config.vendor}" + ) + except Exception as err: + async_ten_env.log_info(f"Failed to initialize OpenAIChatGPT: {err}") + + async def on_stop(self, async_ten_env: AsyncTenEnv) -> None: + async_ten_env.log_info("on_stop") + await super().on_stop(async_ten_env) + + async def on_deinit(self, async_ten_env: AsyncTenEnv) -> None: + async_ten_env.log_info("on_deinit") + await super().on_deinit(async_ten_env) + + async def on_cmd(self, async_ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + async_ten_env.log_info(f"on_cmd name: {cmd_name}") + + if cmd_name == CMD_IN_FLUSH: + await self.flush_input_items(async_ten_env) + await async_ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + async_ten_env.log_info("on_cmd sent flush") + status_code, detail = StatusCode.OK, "success" + cmd_result = CmdResult.create(status_code) + cmd_result.set_property_string("detail", detail) + await async_ten_env.return_result(cmd_result, cmd) + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.config.greeting and self.users_count == 1: + self.send_text_output(async_ten_env, self.config.greeting, True) + + status_code, detail = StatusCode.OK, "success" + cmd_result = CmdResult.create(status_code) + cmd_result.set_property_string("detail", detail) + await async_ten_env.return_result(cmd_result, cmd) + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + status_code, detail = StatusCode.OK, "success" + cmd_result = CmdResult.create(status_code) + cmd_result.set_property_string("detail", detail) + await async_ten_env.return_result(cmd_result, cmd) + else: + await super().on_cmd(async_ten_env, cmd) + + async def on_data(self, async_ten_env: AsyncTenEnv, data: Data) -> None: + data_name = data.get_name() + async_ten_env.log_debug("on_data name {}".format(data_name)) + + # Get the necessary properties + is_final = get_property_bool(data, "is_final") + input_text = get_property_string(data, "text") + + if not is_final: + async_ten_env.log_debug("ignore non-final input") + return + if not input_text: + async_ten_env.log_warn("ignore empty text") + return + + async_ten_env.log_info(f"OnData input text: [{input_text}]") + + # Start an asynchronous task for handling chat completion + message = LLMChatCompletionUserMessageParam(role="user", content=input_text) + await self.queue_input_item(False, messages=[message]) + + async def on_tools_update( + self, async_ten_env: AsyncTenEnv, tool: LLMToolMetadata + ) -> None: + return await super().on_tools_update(async_ten_env, tool) + + async def on_call_chat_completion( + self, async_ten_env: AsyncTenEnv, **kargs: LLMCallCompletionArgs + ) -> any: + kmessages: LLMChatCompletionUserMessageParam = kargs.get("messages", []) + + async_ten_env.log_info(f"on_call_chat_completion: {kmessages}") + response = await self.client.get_chat_completions(kmessages, None) + return response.to_json() + + async def on_data_chat_completion( + self, async_ten_env: AsyncTenEnv, **kargs: LLMDataCompletionArgs + ) -> None: + """Run the chatflow asynchronously.""" + kmessages: Iterable[LLMChatCompletionUserMessageParam] = kargs.get( + "messages", [] + ) + + if len(kmessages) == 0: + async_ten_env.log_error("No message in data") + return + + messages = [] + for message in kmessages: + messages = messages + [self.message_to_dict(message)] + + self.memory_cache = [] + memory = self.memory + try: + async_ten_env.log_info(f"for input text: [{messages}] memory: {memory}") + tools = None + no_tool = kargs.get("no_tool", False) + + for message in messages: + if ( + not isinstance(message.get("content"), str) + and message.get("role") == "user" + ): + non_artifact_content = [ + item + for item in message.get("content", []) + if item.get("type") == "text" + ] + non_artifact_message = { + "role": message.get("role"), + "content": non_artifact_content, + } + self.memory_cache = self.memory_cache + [ + non_artifact_message, + ] + else: + self.memory_cache = self.memory_cache + [ + message, + ] + self.memory_cache = self.memory_cache + [{"role": "assistant", "content": ""}] + + tools = None + if not no_tool and len(self.available_tools) > 0: + tools = [] + for tool in self.available_tools: + tools.append(self._convert_tools_to_dict(tool)) + async_ten_env.log_info(f"tool: {tool}") + + self.sentence_fragment = "" + + # Create an asyncio.Event to signal when content is finished + content_finished_event = asyncio.Event() + # Create a future to track the single tool call task + self.tool_task_future = None + + message_id = str(uuid.uuid4())[:8] + self.last_reasoning_ts = int(time.time() * 1000) + + # Create an async listener to handle tool calls and content updates + async def handle_tool_call(tool_call): + self.tool_task_future = asyncio.get_event_loop().create_future() + async_ten_env.log_info(f"tool_call: {tool_call}") + for tool in self.available_tools: + if tool_call["function"]["name"] == tool.name: + cmd: Cmd = Cmd.create(CMD_TOOL_CALL) + cmd.set_property_string("name", tool.name) + cmd.set_property_from_json( + "arguments", tool_call["function"]["arguments"] + ) + # cmd.set_property_from_json("arguments", json.dumps([])) + + # Send the command and handle the result through the future + [result, _] = await async_ten_env.send_cmd(cmd) + if result.get_status_code() == StatusCode.OK: + tool_result: LLMToolResult = json.loads( + result.get_property_to_json(CMD_PROPERTY_RESULT) + ) + + async_ten_env.log_info(f"tool_result: {tool_result}") + + + if tool_result["type"] == "llmresult": + result_content = tool_result["content"] + if isinstance(result_content, str): + tool_message = { + "role": "assistant", + "tool_calls": [tool_call], + } + new_message = { + "role": "tool", + "content": result_content, + "tool_call_id": tool_call["id"], + } + await self.queue_input_item( + True, messages=[tool_message, new_message], no_tool=True + ) + else: + async_ten_env.log_error( + f"Unknown tool result content: {result_content}" + ) + elif tool_result["type"] == "requery": + # self.memory_cache = [] + self.memory_cache.pop() + result_content = tool_result["content"] + nonlocal message + new_message = { + "role": "user", + "content": self._convert_to_content_parts( + message["content"] + ), + } + new_message["content"] = new_message[ + "content" + ] + self._convert_to_content_parts(result_content) + await self.queue_input_item( + True, messages=[new_message], no_tool=True + ) + else: + async_ten_env.log_error( + f"Unknown tool result type: {tool_result}" + ) + else: + async_ten_env.log_error("Tool call failed") + self.tool_task_future.set_result(None) + + async def handle_content_update(content: str): + # Append the content to the last assistant message + for item in reversed(self.memory_cache): + if item.get("role") == "assistant": + item["content"] = item["content"] + content + break + sentences, self.sentence_fragment = parse_sentences( + self.sentence_fragment, content + ) + for s in sentences: + self.send_text_output(async_ten_env, s, False) + + async def handle_reasoning_update(think: str): + ts = int(time.time() * 1000) + if ts - self.last_reasoning_ts >= 200: + self.last_reasoning_ts = ts + self.send_reasoning_text_output(async_ten_env, message_id, think, False) + + + async def handle_reasoning_update_finish(think: str): + self.last_reasoning_ts = int(time.time() * 1000) + self.send_reasoning_text_output(async_ten_env, message_id, think, True) + + async def handle_content_finished(_: str): + # Wait for the single tool task to complete (if any) + if self.tool_task_future: + await self.tool_task_future + content_finished_event.set() + + listener = AsyncEventEmitter() + listener.on("tool_call", handle_tool_call) + listener.on("content_update", handle_content_update) + listener.on("reasoning_update", handle_reasoning_update) + listener.on("reasoning_update_finish", handle_reasoning_update_finish) + listener.on("content_finished", handle_content_finished) + + # Make an async API call to get chat completions + await self.client.get_chat_completions_stream( + memory + messages, tools, listener + ) + + # Wait for the content to be finished + await content_finished_event.wait() + + async_ten_env.log_info( + f"Chat completion finished for input text: {messages}" + ) + except asyncio.CancelledError: + async_ten_env.log_info(f"Task cancelled: {messages}") + except Exception: + async_ten_env.log_error( + f"Error in chat_completion: {traceback.format_exc()} for input text: {messages}" + ) + finally: + self.send_text_output(async_ten_env, "", True) + # always append the memory + for m in self.memory_cache: + self._append_memory(m) + + def _convert_to_content_parts( + self, content: Iterable[LLMChatCompletionContentPartParam] + ): + content_parts = [] + + if isinstance(content, str): + content_parts.append({"type": "text", "text": content}) + else: + for part in content: + content_parts.append(part) + return content_parts + + def _convert_tools_to_dict(self, tool: LLMToolMetadata): + json_dict = { + "type": "function", + "function": { + "name": tool.name, + "description": tool.description, + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False, + }, + }, + "strict": True, + } + + for param in tool.parameters: + json_dict["function"]["parameters"]["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + json_dict["function"]["parameters"]["required"].append(param.name) + + return json_dict + + def message_to_dict(self, message: LLMChatCompletionMessageParam): + if message.get("content") is not None: + if isinstance(message["content"], str): + message["content"] = str(message["content"]) + else: + message["content"] = list(message["content"]) + return message + + def _append_memory(self, message: str): + if len(self.memory) > self.config.max_memory_length: + removed_item = self.memory.pop(0) + # Remove tool calls from memory + if removed_item.get("tool_calls") and self.memory[0].get("role") == "tool": + self.memory.pop(0) + self.memory.append(message) + + def send_reasoning_text_output( + self, async_ten_env: AsyncTenEnv, msg_id:str, sentence: str, end_of_segment: bool + ): + try: + output_data = Data.create(CONTENT_DATA_OUT_NAME) + output_data.set_property_string(DATA_OUT_PROPERTY_TEXT, json.dumps({ + "id":msg_id, + "data": { + "text": sentence + }, + "type": "reasoning" + })) + output_data.set_property_bool( + DATA_OUT_PROPERTY_END_OF_SEGMENT, end_of_segment + ) + asyncio.create_task(async_ten_env.send_data(output_data)) + # async_ten_env.log_info( + # f"{'end of segment ' if end_of_segment else ''}sent sentence [{sentence}]" + # ) + except Exception: + async_ten_env.log_warn( + f"send sentence [{sentence}] failed, err: {traceback.format_exc()}") \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_chatgpt_python/helper.py b/agents/ten_packages/extension/openai_chatgpt_python/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..4c1cb3a6722834829a540f39ea97e02b46d21117 --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/helper.py @@ -0,0 +1,99 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from PIL import Image +from datetime import datetime +from io import BytesIO +from base64 import b64encode + + +def get_current_time(): + # Get the current time + start_time = datetime.now() + # Get the number of microseconds since the Unix epoch + unix_microseconds = int(start_time.timestamp() * 1_000_000) + return unix_microseconds + + +def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + +def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + # Check if the current sentence contains non-punctuation characters + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" # Reset for the next sentence + + remain = current_sentence # Any remaining characters form the incomplete sentence + return sentences, remain + + +def rgb2base64jpeg(rgb_data, width, height): + # Convert the RGB image to a PIL Image + pil_image = Image.frombytes("RGBA", (width, height), bytes(rgb_data)) + pil_image = pil_image.convert("RGB") + + # Resize the image while maintaining its aspect ratio + pil_image = resize_image_keep_aspect(pil_image, 320) + + # Save the image to a BytesIO object in JPEG format + buffered = BytesIO() + pil_image.save(buffered, format="JPEG") + # pil_image.save("test.jpg", format="JPEG") + + # Get the byte data of the JPEG image + jpeg_image_data = buffered.getvalue() + + # Convert the JPEG byte data to a Base64 encoded string + base64_encoded_image = b64encode(jpeg_image_data).decode("utf-8") + + # Create the data URL + mime_type = "image/jpeg" + base64_url = f"data:{mime_type};base64,{base64_encoded_image}" + return base64_url + + +def resize_image_keep_aspect(image, max_size=512): + """ + Resize an image while maintaining its aspect ratio, ensuring the larger dimension is max_size. + If both dimensions are smaller than max_size, the image is not resized. + + :param image: A PIL Image object + :param max_size: The maximum size for the larger dimension (width or height) + :return: A PIL Image object (resized or original) + """ + # Get current width and height + width, height = image.size + + # If both dimensions are already smaller than max_size, return the original image + if width <= max_size and height <= max_size: + return image + + # Calculate the aspect ratio + aspect_ratio = width / height + + # Determine the new dimensions + if width > height: + new_width = max_size + new_height = int(max_size / aspect_ratio) + else: + new_height = max_size + new_width = int(max_size * aspect_ratio) + + # Resize the image with the new dimensions + resized_image = image.resize((new_width, new_height)) + + return resized_image diff --git a/agents/ten_packages/extension/openai_chatgpt_python/manifest.json b/agents/ten_packages/extension/openai_chatgpt_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..eb8754451d75009ea98dd6f06a2913b65a2dcebc --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/manifest.json @@ -0,0 +1,163 @@ +{ + "type": "extension", + "name": "openai_chatgpt_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "frequency_penalty": { + "type": "float64" + }, + "presence_penalty": { + "type": "float64" + }, + "temperature": { + "type": "float64" + }, + "top_p": { + "type": "float64" + }, + "model": { + "type": "string" + }, + "max_tokens": { + "type": "int64" + }, + "base_url": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + }, + "vendor": { + "type": "string" + }, + "azure_endpoint": { + "type": "string" + }, + "azure_api_version": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + }, + { + "name": "on_user_joined", + "property": {} + }, + { + "name": "on_user_left", + "property": {} + }, + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + }, + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "video_frame_in": [ + { + "name": "video_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_chatgpt_python/openai.py b/agents/ten_packages/extension/openai_chatgpt_python/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..cc6f9e0c69da1b575e65dcebe9542d0c3c54468f --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/openai.py @@ -0,0 +1,240 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from collections import defaultdict +from dataclasses import dataclass +from enum import Enum +import random +import requests +from openai import AsyncOpenAI, AsyncAzureOpenAI +from openai.types.chat.chat_completion import ChatCompletion + +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig + + +@dataclass +class OpenAIChatGPTConfig(BaseConfig): + api_key: str = "" + base_url: str = "https://api.openai.com/v1" + model: str = ( + "gpt-4o" # Adjust this to match the equivalent of `openai.GPT4o` in the Python library + ) + prompt: str = ( + "You are a voice assistant who talks in a conversational way and can chat with me like my friends. I will speak to you in English or Chinese, and you will answer in the corrected and improved version of my text with the language I use. Don’t talk like a robot, instead I would like you to talk like a real human with emotions. I will use your answer for text-to-speech, so don’t return me any meaningless characters. I want you to be helpful, when I’m asking you for advice, give me precise, practical and useful advice instead of being vague. When giving me a list of options, express the options in a narrative way instead of bullet points." + ) + frequency_penalty: float = 0.9 + presence_penalty: float = 0.9 + top_p: float = 1.0 + temperature: float = 0.1 + max_tokens: int = 512 + seed: int = random.randint(0, 10000) + proxy_url: str = "" + greeting: str = "Hello, how can I help you today?" + max_memory_length: int = 10 + vendor: str = "openai" + azure_endpoint: str = "" + azure_api_version: str = "" + + +class ReasoningMode(str, Enum): + ModeV1= "v1" + +class ThinkParser: + def __init__(self): + self.state = 'NORMAL' # States: 'NORMAL', 'THINK' + self.think_content = "" + self.content = "" + + def process(self, new_chars): + if new_chars == "": + self.state = 'THINK' + return True + elif new_chars == "": + self.state = 'NORMAL' + return True + else: + if self.state == "THINK": + self.think_content += new_chars + return False + + def process_by_reasoning_content(self, reasoning_content): + state_changed = False + if reasoning_content: + if self.state == 'NORMAL': + self.state = 'THINK' + state_changed = True + self.think_content += reasoning_content + elif self.state == 'THINK': + self.state = 'NORMAL' + state_changed = True + return state_changed + + +class OpenAIChatGPT: + client = None + + def __init__(self, ten_env: AsyncTenEnv, config: OpenAIChatGPTConfig): + self.config = config + self.ten_env = ten_env + ten_env.log_info(f"OpenAIChatGPT initialized with config: {config.api_key}") + if self.config.vendor == "azure": + self.client = AsyncAzureOpenAI( + api_key=config.api_key, + api_version=self.config.azure_api_version, + azure_endpoint=config.azure_endpoint, + ) + ten_env.log_info( + f"Using Azure OpenAI with endpoint: {config.azure_endpoint}, api_version: {config.azure_api_version}" + ) + else: + self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url, default_headers={ + "api-key": config.api_key, + "Authorization": f"Bearer {config.api_key}" + }) + self.session = requests.Session() + if config.proxy_url: + proxies = { + "http": config.proxy_url, + "https": config.proxy_url, + } + ten_env.log_info(f"Setting proxies: {proxies}") + self.session.proxies.update(proxies) + self.client.session = self.session + + async def get_chat_completions(self, messages, tools=None) -> ChatCompletion: + req = { + "model": self.config.model, + "messages": [ + { + "role": "system", + "content": self.config.prompt, + }, + *messages, + ], + "tools": tools, + "temperature": self.config.temperature, + "top_p": self.config.top_p, + "presence_penalty": self.config.presence_penalty, + "frequency_penalty": self.config.frequency_penalty, + "max_tokens": self.config.max_tokens, + "seed": self.config.seed, + } + + try: + response = await self.client.chat.completions.create(**req) + except Exception as e: + raise RuntimeError(f"CreateChatCompletion failed, err: {e}") from e + + return response + + async def get_chat_completions_stream(self, messages, tools=None, listener=None): + req = { + "model": self.config.model, + "messages": [ + { + "role": "system", + "content": self.config.prompt, + }, + *messages, + ], + "tools": tools, + "temperature": self.config.temperature, + "top_p": self.config.top_p, + "presence_penalty": self.config.presence_penalty, + "frequency_penalty": self.config.frequency_penalty, + "max_tokens": self.config.max_tokens, + "seed": self.config.seed, + "stream": True, + } + + try: + response = await self.client.chat.completions.create(**req) + except Exception as e: + raise RuntimeError(f"CreateChatCompletionStream failed, err: {e}") from e + + full_content = "" + # Check for tool calls + tool_calls_dict = defaultdict( + lambda: { + "id": None, + "function": {"arguments": "", "name": None}, + "type": None, + } + ) + + # Example usage + parser = ThinkParser() + reasoning_mode = None + + async for chat_completion in response: + # self.ten_env.log_info(f"Chat completion: {chat_completion}") + if len(chat_completion.choices) == 0: + continue + choice = chat_completion.choices[0] + delta = choice.delta + + content = delta.content if delta and delta.content else "" + reasoning_content = delta.reasoning_content if delta and hasattr(delta, "reasoning_content") and delta.reasoning_content else "" + + if reasoning_mode is None and reasoning_content is not None: + reasoning_mode = ReasoningMode.ModeV1 + + # Emit content update event (fire-and-forget) + if listener and (content or reasoning_mode == ReasoningMode.ModeV1): + prev_state = parser.state + + if reasoning_mode == ReasoningMode.ModeV1: + self.ten_env.log_info("process_by_reasoning_content") + think_state_changed = parser.process_by_reasoning_content(reasoning_content) + else: + think_state_changed = parser.process(content) + + if not think_state_changed: + # self.ten_env.log_info(f"state: {parser.state}, content: {content}, think: {parser.think_content}") + if parser.state == "THINK": + listener.emit("reasoning_update", parser.think_content) + elif parser.state == "NORMAL": + listener.emit("content_update", content) + + if prev_state == "THINK" and parser.state == "NORMAL": + listener.emit("reasoning_update_finish", parser.think_content) + parser.think_content = "" + + full_content += content + + if delta.tool_calls: + for tool_call in delta.tool_calls: + if tool_call.id is not None: + tool_calls_dict[tool_call.index]["id"] = tool_call.id + + # If the function name is not None, set it + if tool_call.function.name is not None: + tool_calls_dict[tool_call.index]["function"][ + "name" + ] = tool_call.function.name + + # Append the arguments + tool_calls_dict[tool_call.index]["function"][ + "arguments" + ] += tool_call.function.arguments + + # If the type is not None, set it + if tool_call.type is not None: + tool_calls_dict[tool_call.index]["type"] = tool_call.type + + # Convert the dictionary to a list + tool_calls_list = list(tool_calls_dict.values()) + + # Emit tool calls event (fire-and-forget) + if listener and tool_calls_list: + for tool_call in tool_calls_list: + listener.emit("tool_call", tool_call) + + # Emit content finished event after the loop completes + if listener: + listener.emit("content_finished", full_content) diff --git a/agents/ten_packages/extension/openai_chatgpt_python/property.json b/agents/ten_packages/extension/openai_chatgpt_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..b7d95f6f73ec86e61f185d565537e607c13f33ec --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/property.json @@ -0,0 +1,11 @@ +{ + "base_url": "", + "api_key": "${env:OPENAI_API_KEY}", + "frequency_penalty": 0.9, + "model": "${env:OPENAI_MODEL}", + "max_tokens": 512, + "prompt": "", + "proxy_url": "${env:OPENAI_PROXY_URL}", + "greeting": "TEN Agent connected. How can I help you today?", + "max_memory_length": 10 +} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_chatgpt_python/requirements.txt b/agents/ten_packages/extension/openai_chatgpt_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2ad22ea8b82e35b163d99a51e5cffdc73c521f8a --- /dev/null +++ b/agents/ten_packages/extension/openai_chatgpt_python/requirements.txt @@ -0,0 +1,4 @@ +openai +numpy +requests[socks] +pillow \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_image_generate_tool/README.md b/agents/ten_packages/extension/openai_image_generate_tool/README.md new file mode 100644 index 0000000000000000000000000000000000000000..73c27cb295238c98eefd40dcae36d3871467a85c --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/README.md @@ -0,0 +1,29 @@ +# image_generate_tool + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/openai_image_generate_tool/__init__.py b/agents/ten_packages/extension/openai_image_generate_tool/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/openai_image_generate_tool/addon.py b/agents/ten_packages/extension/openai_image_generate_tool/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..b203478332e5c2f12adeea9d309e250eb657d113 --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/addon.py @@ -0,0 +1,20 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import OpenAIImageGenerateToolExtension + + +@register_addon_as_extension("openai_image_generate_tool") +class OpenAIImageGenerateToolExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + ten_env.log_info("on_create_instance") + ten_env.on_create_instance_done( + OpenAIImageGenerateToolExtension(name), context) diff --git a/agents/ten_packages/extension/openai_image_generate_tool/extension.py b/agents/ten_packages/extension/openai_image_generate_tool/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..2e56b9432fa7a475ab2e6a5215f852e6a9f850e8 --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/extension.py @@ -0,0 +1,94 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import asyncio +import json +from ten import ( + Data, + TenEnv, + AsyncTenEnv, +) +from ten_ai_base.const import DATA_OUT_PROPERTY_END_OF_SEGMENT, DATA_OUT_PROPERTY_TEXT, CONTENT_DATA_OUT_NAME +from ten_ai_base.types import LLMToolMetadataParameter, LLMToolResultLLMResult +from ten_ai_base.llm_tool import ( + AsyncLLMToolBaseExtension, LLMToolMetadata, LLMToolResult +) +from .openai import OpenAIImageGenerateClient, OpenAIImageGenerateToolConfig + +class OpenAIImageGenerateToolExtension(AsyncLLMToolBaseExtension): + def __init__(self, name: str): + super().__init__(name) + self.config = None + self.client = None + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + + # initialize configuration + self.config = await OpenAIImageGenerateToolConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.api_key: + ten_env.log_error("API key is not set") + return + + # initialize OpenAIImageGenerateClient + self.client = OpenAIImageGenerateClient(ten_env, self.config) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + + def get_tool_metadata(self, ten_env: TenEnv) -> list[LLMToolMetadata]: + return [ + LLMToolMetadata( + name="image_generate", + description="Generate image by prompt query", + parameters=[ + LLMToolMetadataParameter( + name="prompt", + type="string", + description="Prompt to generate images in the language you use", + ), + ], + ) + ] + + async def send_image(self, async_ten_env: AsyncTenEnv, image_url: str) -> None: + # Implement this method to send the image to the chat. + async_ten_env.log_info(f"Sending image: {image_url}") + try: + sentence = json.dumps({"data":{"image_url": image_url}, "type": "image_url"}) + output_data = Data.create(CONTENT_DATA_OUT_NAME) + output_data.set_property_string( + DATA_OUT_PROPERTY_TEXT, + sentence + ) + output_data.set_property_bool( + DATA_OUT_PROPERTY_END_OF_SEGMENT, True + ) + asyncio.create_task(async_ten_env.send_data(output_data)) + async_ten_env.log_info( + f"sent sentence [{sentence}]" + ) + except Exception as err: + async_ten_env.log_warn(f"send sentence [{sentence}] failed, err: {err}") + + + async def run_tool(self, ten_env: AsyncTenEnv, name: str, args: dict) -> LLMToolResult | None: + ten_env.log_info(f"run_tool {name} {args}") + if name == "image_generate": + prompt = args.get("prompt") + if prompt: + # Implement this method to run your tool with the given arguments. + ten_env.log_info(f"Generating image with prompt: {prompt}") + # call OpenAIImageGenerateClient to generate images + response_url = await self.client.generate_images(prompt) + ten_env.log_info(f"Generated image: {response_url}") + await self.send_image(ten_env, response_url) + result = LLMToolResultLLMResult( + type="llmresult", + content=json.dumps({"success": True}), + ) + return result diff --git a/agents/ten_packages/extension/openai_image_generate_tool/manifest.json b/agents/ten_packages/extension/openai_image_generate_tool/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..b3c4b84afa089a54f6dd1b17907d8a10af6a05f4 --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/manifest.json @@ -0,0 +1,117 @@ +{ + "type": "extension", + "name": "openai_image_generate_tool", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "requirements.txt", + "**.tent", + "**.py", + "README.md" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model": { + "type": "string" + }, + "base_url": { + "type": "string" + }, + "size": { + "type": "string" + }, + "n": { + "type": "int64" + }, + "proxy_url": { + "type": "string" + }, + "vendor": { + "type": "string" + }, + "azure_endpoint": { + "type": "string" + }, + "azure_api_version": { + "type": "string" + } + }, + "cmd_in": [ + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "arguments": { + "type": "object", + "properties": {} + } + }, + "required": [ + "name" + ] + } + ], + "cmd_out": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "data_out": [ + { + "name": "content_data", + "property": { + "text": { + "type": "string" + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_image_generate_tool/openai.py b/agents/ten_packages/extension/openai_image_generate_tool/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..36bb74f72a30fe2fb2c4d7ec07b13690f176d6b7 --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/openai.py @@ -0,0 +1,70 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from dataclasses import dataclass +import requests +from openai import AsyncOpenAI, AsyncAzureOpenAI + +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig + + +@dataclass +class OpenAIImageGenerateToolConfig(BaseConfig): + api_key: str = "" + base_url: str = "https://api.openai.com/v1" + model: str = "dall-e-3" + size: str = "1024x1024" + quality: str = "standard" + n: int = 1 + proxy_url: str = "" + vendor: str = "openai" + azure_endpoint: str = "" + azure_api_version: str = "" + +class OpenAIImageGenerateClient: + client = None + + def __init__(self, ten_env: AsyncTenEnv, config: OpenAIImageGenerateToolConfig): + self.config = config + ten_env.log_info(f"OpenAIImageGenerateClient initialized with config: {config.api_key}") + if self.config.vendor == "azure": + self.client = AsyncAzureOpenAI( + api_key=config.api_key, + api_version=self.config.azure_api_version, + azure_endpoint=config.azure_endpoint, + ) + ten_env.log_info( + f"Using Azure OpenAI with endpoint: {config.azure_endpoint}, api_version: {config.azure_api_version}" + ) + else: + self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url) + self.session = requests.Session() + if config.proxy_url: + proxies = { + "http": config.proxy_url, + "https": config.proxy_url, + } + ten_env.log_info(f"Setting proxies: {proxies}") + self.session.proxies.update(proxies) + self.client.session = self.session + + + async def generate_images(self, prompt: str) -> str: + req = { + "model": self.config.model, + "prompt": prompt, + "size": self.config.size, + "quality": self.config.quality, + "n": self.config.n, + } + + try: + response = await self.client.images.generate(**req) + except Exception as e: + raise RuntimeError(f"GenerateImages failed, err: {e}") from e + return response.data[0].url \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_image_generate_tool/property.json b/agents/ten_packages/extension/openai_image_generate_tool/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_image_generate_tool/requirements.txt b/agents/ten_packages/extension/openai_image_generate_tool/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fec3068446e2885055c595e911e87cae8a88c5a --- /dev/null +++ b/agents/ten_packages/extension/openai_image_generate_tool/requirements.txt @@ -0,0 +1,2 @@ +openai +requests[socks] \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_v2v_python/README.md b/agents/ten_packages/extension/openai_v2v_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3cd294f3dd0de745df035921cf238de5fe3bb218 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/README.md @@ -0,0 +1,65 @@ +# openai_v2v_python + +An extension for integrating OpenAI's Next Generation of **Multimodal** AI into your application, providing configurable AI-driven features such as conversational agents, task automation, and tool integration. + +## Features + + + +- OpenAI **Multimodal** Integration: Leverage GPT **Multimodal** models for voice to voice as well as text processing. +- Configurable: Easily customize API keys, model settings, prompts, temperature, etc. +- Async Queue Processing: Supports real-time message processing with task cancellation and prioritization. + + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +| **Property** | **Type** | **Description** | +|----------------------------|------------|-------------------------------------------| +| `api_key` | `string` | API key for authenticating with OpenAI | +| `temperature` | `float64` | Sampling temperature, higher values mean more randomness | +| `model` | `string` | Model identifier (e.g., GPT-3.5, GPT-4) | +| `max_tokens` | `int64` | Maximum number of tokens to generate | +| `system_message` | `string` | Default system message to send to the model | +| `voice` | `string` | Voice that OpenAI model speeches, such as `alloy`, `echo`, `shimmer`, etc | +| `server_vad` | `bool` | Flag to enable or disable server vad of OpenAI | +| `language` | `string` | Language that OpenAO model reponds, such as `en-US`, `zh-CN`, etc | +| `dump` | `bool` | Flag to enable or disable audio dump for debugging purpose | + +### Data Out: +| **Name** | **Property** | **Type** | **Description** | +|----------------|--------------|------------|-------------------------------| +| `text_data` | `text` | `string` | Outgoing text data | + +### Command Out: +| **Name** | **Description** | +|----------------|---------------------------------------------| +| `flush` | Response after flushing the current state | + +### Audio Frame In: +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `pcm_frame` | Audio frame input for voice processing | + +### Audio Frame Out: +| **Name** | **Description** | +|------------------|-------------------------------------------| +| `pcm_frame` | Audio frame output after voice processing | + + +### Azure Support + +This extension also support Azure OpenAI Service, the propoerty settings are as follow: + +``` json +{ + "base_uri": "wss://xxx.openai.azure.com", + "path": "/openai/realtime?api-version=xxx&deployment=xxx", + "api_key": "xxx", + "model": "gpt-4o-realtime-preview", + "vendor": "azure" +} +``` \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_v2v_python/__init__.py b/agents/ten_packages/extension/openai_v2v_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd75ddef4ae8e15366d6ed94ee557e6481a4989 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/openai_v2v_python/addon.py b/agents/ten_packages/extension/openai_v2v_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..be3aa46aeef9e13972f4b6ccb1145811cc57e4c3 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/addon.py @@ -0,0 +1,22 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("openai_v2v_python") +class OpenAIRealtimeExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import OpenAIRealtimeExtension + + ten_env.log_info("OpenAIRealtimeExtensionAddon on_create_instance") + ten_env.on_create_instance_done(OpenAIRealtimeExtension(name), context) diff --git a/agents/ten_packages/extension/openai_v2v_python/extension.py b/agents/ten_packages/extension/openai_v2v_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a0fb60ebe100446b787c15aa6750ed630ff9140d --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/extension.py @@ -0,0 +1,834 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +import asyncio +import base64 +import json +from enum import Enum +import traceback +import time +import numpy as np +from datetime import datetime +from typing import Iterable + +from ten import ( + AudioFrame, + AsyncTenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +from ten.audio_frame import AudioFrameDataFmt +from ten_ai_base.const import CMD_PROPERTY_RESULT, CMD_TOOL_CALL +from dataclasses import dataclass +from ten_ai_base.config import BaseConfig +from ten_ai_base.chat_memory import ( + ChatMemory, + EVENT_MEMORY_EXPIRED, + EVENT_MEMORY_APPENDED, +) +from ten_ai_base.usage import ( + LLMUsage, + LLMCompletionTokensDetails, + LLMPromptTokensDetails, +) +from ten_ai_base.types import ( + LLMToolMetadata, + LLMToolResult, + LLMChatCompletionContentPartParam, +) +from ten_ai_base.llm import AsyncLLMBaseExtension +from .realtime.connection import RealtimeApiConnection +from .realtime.struct import ( + ItemCreate, + SessionCreated, + ItemCreated, + UserMessageItemParam, + AssistantMessageItemParam, + ItemInputAudioTranscriptionCompleted, + ItemInputAudioTranscriptionFailed, + ResponseCreated, + ResponseDone, + ResponseAudioTranscriptDelta, + ResponseTextDelta, + ResponseAudioTranscriptDone, + ResponseTextDone, + ResponseOutputItemDone, + ResponseOutputItemAdded, + ResponseAudioDelta, + ResponseAudioDone, + InputAudioBufferSpeechStarted, + InputAudioBufferSpeechStopped, + ResponseFunctionCallArgumentsDone, + ErrorMessage, + ItemDelete, + ItemTruncate, + SessionUpdate, + SessionUpdateParams, + InputAudioTranscription, + ContentType, + FunctionCallOutputItemParam, + ResponseCreate, +) + +CMD_IN_FLUSH = "flush" +CMD_IN_ON_USER_JOINED = "on_user_joined" +CMD_IN_ON_USER_LEFT = "on_user_left" +CMD_OUT_FLUSH = "flush" + + +class Role(str, Enum): + User = "user" + Assistant = "assistant" + + +@dataclass +class OpenAIRealtimeConfig(BaseConfig): + base_uri: str = "wss://api.openai.com" + api_key: str = "" + path: str = "/v1/realtime" + model: str = "gpt-4o-realtime-preview" + language: str = "en-US" + prompt: str = "" + temperature: float = 0.5 + max_tokens: int = 1024 + voice: str = "alloy" + server_vad: bool = True + audio_out: bool = True + input_transcript: bool = True + sample_rate: int = 24000 + + vendor: str = "" + stream_id: int = 0 + dump: bool = False + greeting: str = "" + max_history: int = 20 + enable_storage: bool = False + + def build_ctx(self) -> dict: + return { + "language": self.language, + "model": self.model, + } + + +class OpenAIRealtimeExtension(AsyncLLMBaseExtension): + + def __init__(self, name: str): + super().__init__(name) + self.ten_env: AsyncTenEnv = None + self.conn = None + self.session = None + self.session_id = None + + self.config: OpenAIRealtimeConfig = None + self.stopped: bool = False + self.connected: bool = False + self.buffer: bytearray = b"" + self.memory: ChatMemory = None + self.total_usage: LLMUsage = LLMUsage() + self.users_count = 0 + + self.stream_id: int = 0 + self.remote_stream_id: int = 0 + self.channel_name: str = "" + self.audio_len_threshold: int = 5120 + + self.completion_times = [] + self.connect_times = [] + self.first_token_times = [] + + self.buff: bytearray = b"" + self.transcript: str = "" + self.ctx: dict = {} + self.input_end = time.time() + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.ten_env = ten_env + + self.loop = asyncio.get_event_loop() + + self.config = await OpenAIRealtimeConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + + if not self.config.api_key: + ten_env.log_error("api_key is required") + return + + try: + self.memory = ChatMemory(self.config.max_history) + + if self.config.enable_storage: + [result, _] = await ten_env.send_cmd(Cmd.create("retrieve")) + if result.get_status_code() == StatusCode.OK: + try: + history = json.loads(result.get_property_string("response")) + for i in history: + self.memory.put(i) + ten_env.log_info(f"on retrieve context {history}") + except Exception as e: + ten_env.log_error(f"Failed to handle retrieve result {e}") + else: + ten_env.log_warn("Failed to retrieve content") + + self.memory.on(EVENT_MEMORY_EXPIRED, self._on_memory_expired) + self.memory.on(EVENT_MEMORY_APPENDED, self._on_memory_appended) + + self.ctx = self.config.build_ctx() + self.ctx["greeting"] = self.config.greeting + + self.conn = RealtimeApiConnection( + ten_env=ten_env, + base_uri=self.config.base_uri, + path=self.config.path, + api_key=self.config.api_key, + model=self.config.model, + vendor=self.config.vendor, + ) + ten_env.log_info("Finish init client") + + self.loop.create_task(self._loop()) + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to init client {e}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_info("on_stop") + + self.stopped = True + + async def on_audio_frame(self, _: AsyncTenEnv, audio_frame: AudioFrame) -> None: + try: + stream_id = audio_frame.get_property_int("stream_id") + if self.channel_name == "": + self.channel_name = audio_frame.get_property_string("channel") + + if self.remote_stream_id == 0: + self.remote_stream_id = stream_id + + frame_buf = audio_frame.get_buf() + self._dump_audio_if_need(frame_buf, Role.User) + + await self._on_audio(frame_buf) + if not self.config.server_vad: + self.input_end = time.time() + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"OpenAIV2VExtension on audio frame failed {e}") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + status = StatusCode.OK + detail = "success" + + if cmd_name == CMD_IN_FLUSH: + # Will only flush if it is client side vad + await self._flush() + await ten_env.send_cmd(Cmd.create(CMD_OUT_FLUSH)) + ten_env.log_info("on flush") + elif cmd_name == CMD_IN_ON_USER_JOINED: + self.users_count += 1 + # Send greeting when first user joined + if self.users_count == 1: + await self._greeting() + elif cmd_name == CMD_IN_ON_USER_LEFT: + self.users_count -= 1 + else: + # Register tool + await super().on_cmd(ten_env, cmd) + return + + cmd_result = CmdResult.create(status) + cmd_result.set_property_string("detail", detail) + await ten_env.return_result(cmd_result, cmd) + + # Not support for now + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + pass + + async def _loop(self): + def get_time_ms() -> int: + current_time = datetime.now() + return current_time.microsecond // 1000 + + try: + start_time = time.time() + await self.conn.connect() + self.connect_times.append(time.time() - start_time) + item_id = "" # For truncate + response_id = "" + content_index = 0 + relative_start_ms = get_time_ms() + flushed = set() + + self.ten_env.log_info("Client loop started") + async for message in self.conn.listen(): + try: + # self.ten_env.log_info(f"Received message: {message.type}") + match message: + case SessionCreated(): + self.ten_env.log_info( + f"Session is created: {message.session}" + ) + self.session_id = message.session.id + self.session = message.session + await self._update_session() + + history = self.memory.get() + for h in history: + if h["role"] == "user": + await self.conn.send_request( + ItemCreate( + item=UserMessageItemParam( + content=[ + { + "type": ContentType.InputText, + "text": h["content"], + } + ] + ) + ) + ) + elif h["role"] == "assistant": + await self.conn.send_request( + ItemCreate( + item=AssistantMessageItemParam( + content=[ + { + "type": ContentType.InputText, + "text": h["content"], + } + ] + ) + ) + ) + self.ten_env.log_info(f"Finish send history {history}") + self.memory.clear() + + if not self.connected: + self.connected = True + await self._greeting() + case ItemInputAudioTranscriptionCompleted(): + self.ten_env.log_info( + f"On request transcript {message.transcript}" + ) + self._send_transcript(message.transcript, Role.User, True) + self.memory.put( + { + "role": "user", + "content": message.transcript, + "id": message.item_id, + } + ) + case ItemInputAudioTranscriptionFailed(): + self.ten_env.log_warn( + f"On request transcript failed {message.item_id} {message.error}" + ) + case ItemCreated(): + self.ten_env.log_info(f"On item created {message.item}") + case ResponseCreated(): + response_id = message.response.id + self.ten_env.log_info(f"On response created {response_id}") + case ResponseDone(): + msg_resp_id = message.response.id + status = message.response.status + if msg_resp_id == response_id: + response_id = "" + self.ten_env.log_info( + f"On response done {msg_resp_id} {status} {message.response.usage}" + ) + if message.response.usage: + pass + # await self._update_usage(message.response.usage) + case ResponseAudioTranscriptDelta(): + self.ten_env.log_info( + f"On response transcript delta {message.response_id} {message.output_index} {message.content_index} {message.delta}" + ) + if message.response_id in flushed: + self.ten_env.log_warn( + f"On flushed transcript delta {message.response_id} {message.output_index} {message.content_index} {message.delta}" + ) + continue + self._send_transcript(message.delta, Role.Assistant, False) + case ResponseTextDelta(): + self.ten_env.log_info( + f"On response text delta {message.response_id} {message.output_index} {message.content_index} {message.delta}" + ) + if message.response_id in flushed: + self.ten_env.log_warn( + f"On flushed text delta {message.response_id} {message.output_index} {message.content_index} {message.delta}" + ) + continue + if item_id != message.item_id: + item_id = message.item_id + self.first_token_times.append( + time.time() - self.input_end + ) + self._send_transcript(message.delta, Role.Assistant, False) + case ResponseAudioTranscriptDone(): + self.ten_env.log_info( + f"On response transcript done {message.output_index} {message.content_index} {message.transcript}" + ) + if message.response_id in flushed: + self.ten_env.log_warn( + f"On flushed transcript done {message.response_id}" + ) + continue + self.memory.put( + { + "role": "assistant", + "content": message.transcript, + "id": message.item_id, + } + ) + self.transcript = "" + self._send_transcript("", Role.Assistant, True) + case ResponseTextDone(): + self.ten_env.log_info( + f"On response text done {message.output_index} {message.content_index} {message.text}" + ) + if message.response_id in flushed: + self.ten_env.log_warn( + f"On flushed text done {message.response_id}" + ) + continue + self.completion_times.append(time.time() - self.input_end) + self.transcript = "" + self._send_transcript("", Role.Assistant, True) + case ResponseOutputItemDone(): + self.ten_env.log_info(f"Output item done {message.item}") + case ResponseOutputItemAdded(): + self.ten_env.log_info( + f"Output item added {message.output_index} {message.item}" + ) + case ResponseAudioDelta(): + if message.response_id in flushed: + self.ten_env.log_warn( + f"On flushed audio delta {message.response_id} {message.item_id} {message.content_index}" + ) + continue + if item_id != message.item_id: + item_id = message.item_id + self.first_token_times.append( + time.time() - self.input_end + ) + content_index = message.content_index + await self._on_audio_delta(message.delta) + case ResponseAudioDone(): + self.completion_times.append(time.time() - self.input_end) + case InputAudioBufferSpeechStarted(): + self.ten_env.log_info( + f"On server listening, in response {response_id}, last item {item_id}" + ) + # Tuncate the on-going audio stream + end_ms = get_time_ms() - relative_start_ms + if item_id: + truncate = ItemTruncate( + item_id=item_id, + content_index=content_index, + audio_end_ms=end_ms, + ) + await self.conn.send_request(truncate) + if self.config.server_vad: + await self._flush() + if response_id and self.transcript: + transcript = self.transcript + "[interrupted]" + self._send_transcript(transcript, Role.Assistant, True) + self.transcript = "" + # memory leak, change to lru later + flushed.add(response_id) + item_id = "" + case InputAudioBufferSpeechStopped(): + # Only for server vad + self.input_end = time.time() + relative_start_ms = get_time_ms() - message.audio_end_ms + self.ten_env.log_info( + f"On server stop listening, {message.audio_end_ms}, relative {relative_start_ms}" + ) + case ResponseFunctionCallArgumentsDone(): + tool_call_id = message.call_id + name = message.name + arguments = message.arguments + self.ten_env.log_info(f"need to call func {name}") + self.loop.create_task( + self._handle_tool_call(tool_call_id, name, arguments) + ) + case ErrorMessage(): + self.ten_env.log_error( + f"Error message received: {message.error}" + ) + case _: + self.ten_env.log_debug(f"Not handled message {message}") + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Error processing message: {message} {e}") + + self.ten_env.log_info("Client loop finished") + except Exception as e: + traceback.print_exc() + self.ten_env.log_error(f"Failed to handle loop {e}") + + # clear so that new session can be triggered + self.connected = False + self.remote_stream_id = 0 + + if not self.stopped: + await self.conn.close() + await asyncio.sleep(0.5) + self.ten_env.log_info("Reconnect") + + self.conn = RealtimeApiConnection( + ten_env=self.ten_env, + base_uri=self.config.base_uri, + path=self.config.path, + api_key=self.config.api_key, + model=self.config.model, + vendor=self.config.vendor, + ) + + self.loop.create_task(self._loop()) + + async def _on_memory_expired(self, message: dict) -> None: + self.ten_env.log_info(f"Memory expired: {message}") + item_id = message.get("item_id") + if item_id: + await self.conn.send_request(ItemDelete(item_id=item_id)) + + async def _on_memory_appended(self, message: dict) -> None: + self.ten_env.log_info(f"Memory appended: {message}") + if not self.config.enable_storage: + return + + role = message.get("role") + stream_id = self.remote_stream_id if role == Role.User else 0 + try: + d = Data.create("append") + d.set_property_string("text", message.get("content")) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + asyncio.create_task(self.ten_env.send_data(d)) + except Exception as e: + self.ten_env.log_error(f"Error send append_context data {message} {e}") + + # Direction: IN + async def _on_audio(self, buff: bytearray): + self.buff += buff + # Buffer audio + if self.connected and len(self.buff) >= self.audio_len_threshold: + await self.conn.send_audio_data(self.buff) + self.buff = b"" + + async def _update_session(self) -> None: + tools = [] + + def tool_dict(tool: LLMToolMetadata): + t = { + "type": "function", + "name": tool.name, + "description": tool.description, + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False, + }, + } + + for param in tool.parameters: + t["parameters"]["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + t["parameters"]["required"].append(param.name) + + return t + + if self.available_tools: + tool_prompt = "You have several tools that you can get help from:\n" + for t in self.available_tools: + tool_prompt += f"- ***{t.name}***: {t.description}" + self.ctx["tools"] = tool_prompt + tools = [tool_dict(t) for t in self.available_tools] + prompt = self._replace(self.config.prompt) + + self.ten_env.log_info(f"update session {prompt} {tools}") + su = SessionUpdate( + session=SessionUpdateParams( + instructions=prompt, + model=self.config.model, + tool_choice="auto" if self.available_tools else "none", + tools=tools, + ) + ) + if self.config.audio_out: + su.session.voice = self.config.voice + else: + su.session.modalities = ["text"] + + if self.config.input_transcript: + su.session.input_audio_transcription = InputAudioTranscription( + model="whisper-1" + ) + await self.conn.send_request(su) + + async def on_tools_update(self, _: AsyncTenEnv, tool: LLMToolMetadata) -> None: + """Called when a new tool is registered. Implement this method to process the new tool.""" + self.ten_env.log_info(f"on tools update {tool}") + # await self._update_session() + + def _replace(self, prompt: str) -> str: + result = prompt + for token, value in self.ctx.items(): + result = result.replace("{" + token + "}", value) + return result + + # Direction: OUT + async def _on_audio_delta(self, delta: bytes) -> None: + audio_data = base64.b64decode(delta) + self.ten_env.log_debug( + f"on_audio_delta audio_data len {len(audio_data)} samples {len(audio_data) // 2}" + ) + self._dump_audio_if_need(audio_data, Role.Assistant) + + f = AudioFrame.create("pcm_frame") + f.set_sample_rate(self.config.sample_rate) + f.set_bytes_per_sample(2) + f.set_number_of_channels(1) + f.set_data_fmt(AudioFrameDataFmt.INTERLEAVE) + f.set_samples_per_channel(len(audio_data) // 2) + f.alloc_buf(len(audio_data)) + buff = f.lock_buf() + buff[:] = audio_data + f.unlock_buf(buff) + await self.ten_env.send_audio_frame(f) + + def _send_transcript(self, content: str, role: Role, is_final: bool) -> None: + def is_punctuation(char): + if char in [",", ",", ".", "。", "?", "?", "!", "!"]: + return True + return False + + def parse_sentences(sentence_fragment, content): + sentences = [] + current_sentence = sentence_fragment + for char in content: + current_sentence += char + if is_punctuation(char): + # Check if the current sentence contains non-punctuation characters + stripped_sentence = current_sentence + if any(c.isalnum() for c in stripped_sentence): + sentences.append(stripped_sentence) + current_sentence = "" # Reset for the next sentence + + remain = current_sentence # Any remaining characters form the incomplete sentence + return sentences, remain + + def send_data( + ten_env: AsyncTenEnv, + sentence: str, + stream_id: int, + role: str, + is_final: bool, + ): + try: + d = Data.create("text_data") + d.set_property_string("text", sentence) + d.set_property_bool("end_of_segment", is_final) + d.set_property_string("role", role) + d.set_property_int("stream_id", stream_id) + ten_env.log_info( + f"send transcript text [{sentence}] stream_id {stream_id} is_final {is_final} end_of_segment {is_final} role {role}" + ) + asyncio.create_task(ten_env.send_data(d)) + except Exception as e: + ten_env.log_error( + f"Error send text data {role}: {sentence} {is_final} {e}" + ) + + stream_id = self.remote_stream_id if role == Role.User else 0 + try: + if role == Role.Assistant and not is_final: + sentences, self.transcript = parse_sentences(self.transcript, content) + for s in sentences: + send_data(self.ten_env, s, stream_id, role, is_final) + else: + send_data(self.ten_env, content, stream_id, role, is_final) + except Exception as e: + self.ten_env.log_error( + f"Error send text data {role}: {content} {is_final} {e}" + ) + + def _dump_audio_if_need(self, buf: bytearray, role: Role) -> None: + if not self.config.dump: + return + + with open("{}_{}.pcm".format(role, self.channel_name), "ab") as dump_file: + dump_file.write(buf) + + async def _handle_tool_call( + self, tool_call_id: str, name: str, arguments: str + ) -> None: + self.ten_env.log_info(f"_handle_tool_call {tool_call_id} {name} {arguments}") + cmd: Cmd = Cmd.create(CMD_TOOL_CALL) + cmd.set_property_string("name", name) + cmd.set_property_from_json("arguments", arguments) + [result, _] = await self.ten_env.send_cmd(cmd) + + tool_response = ItemCreate( + item=FunctionCallOutputItemParam( + call_id=tool_call_id, + output='{"success":false}', + ) + ) + if result.get_status_code() == StatusCode.OK: + tool_result: LLMToolResult = json.loads( + result.get_property_to_json(CMD_PROPERTY_RESULT) + ) + + result_content = tool_result["content"] + tool_response.item.output = json.dumps( + self._convert_to_content_parts(result_content) + ) + self.ten_env.log_info(f"tool_result: {tool_call_id} {tool_result}") + else: + self.ten_env.log_error("Tool call failed") + + await self.conn.send_request(tool_response) + await self.conn.send_request(ResponseCreate()) + self.ten_env.log_info(f"_remote_tool_call finish {name} {arguments}") + + def _greeting_text(self) -> str: + text = "Hi, there." + if self.config.language == "zh-CN": + text = "你好。" + elif self.config.language == "ja-JP": + text = "こんにちは" + elif self.config.language == "ko-KR": + text = "안녕하세요" + return text + + def _convert_tool_params_to_dict(self, tool: LLMToolMetadata): + json_dict = {"type": "object", "properties": {}, "required": []} + + for param in tool.parameters: + json_dict["properties"][param.name] = { + "type": param.type, + "description": param.description, + } + if param.required: + json_dict["required"].append(param.name) + + return json_dict + + def _convert_to_content_parts( + self, content: Iterable[LLMChatCompletionContentPartParam] + ): + content_parts = [] + + if isinstance(content, str): + content_parts.append({"type": "text", "text": content}) + else: + for part in content: + # Only text content is supported currently for v2v model + if part["type"] == "text": + content_parts.append(part) + return content_parts + + async def _greeting(self) -> None: + if self.connected and self.users_count == 1: + text = self._greeting_text() + if self.config.greeting: + text = "Say '" + self.config.greeting + "' to me." + self.ten_env.log_info(f"send greeting {text}") + await self.conn.send_request( + ItemCreate( + item=UserMessageItemParam( + content=[{"type": ContentType.InputText, "text": text}] + ) + ) + ) + await self.conn.send_request(ResponseCreate()) + + async def _flush(self) -> None: + try: + c = Cmd.create("flush") + await self.ten_env.send_cmd(c) + except Exception: + self.ten_env.log_error("Error flush") + + async def _update_usage(self, usage: dict) -> None: + self.total_usage.completion_tokens += usage.get("output_tokens") or 0 + self.total_usage.prompt_tokens += usage.get("input_tokens") or 0 + self.total_usage.total_tokens += usage.get("total_tokens") or 0 + if not self.total_usage.completion_tokens_details: + self.total_usage.completion_tokens_details = LLMCompletionTokensDetails() + if not self.total_usage.prompt_tokens_details: + self.total_usage.prompt_tokens_details = LLMPromptTokensDetails() + + if usage.get("output_token_details"): + self.total_usage.completion_tokens_details.accepted_prediction_tokens += ( + usage["output_token_details"].get("text_tokens") + ) + self.total_usage.completion_tokens_details.audio_tokens += usage[ + "output_token_details" + ].get("audio_tokens") + + if usage.get("input_token_details:"): + self.total_usage.prompt_tokens_details.audio_tokens += usage[ + "input_token_details" + ].get("audio_tokens") + self.total_usage.prompt_tokens_details.cached_tokens += usage[ + "input_token_details" + ].get("cached_tokens") + self.total_usage.prompt_tokens_details.text_tokens += usage[ + "input_token_details" + ].get("text_tokens") + + self.ten_env.log_info(f"total usage: {self.total_usage}") + + data = Data.create("llm_stat") + data.set_property_from_json("usage", json.dumps(self.total_usage.model_dump())) + if self.connect_times and self.completion_times and self.first_token_times: + data.set_property_from_json( + "latency", + json.dumps( + { + "connection_latency_95": np.percentile(self.connect_times, 95), + "completion_latency_95": np.percentile( + self.completion_times, 95 + ), + "first_token_latency_95": np.percentile( + self.first_token_times, 95 + ), + "connection_latency_99": np.percentile(self.connect_times, 99), + "completion_latency_99": np.percentile( + self.completion_times, 99 + ), + "first_token_latency_99": np.percentile( + self.first_token_times, 99 + ), + } + ), + ) + asyncio.create_task(self.ten_env.send_data(data)) + + async def on_call_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError + + async def on_data_chat_completion(self, async_ten_env, **kargs): + raise NotImplementedError diff --git a/agents/ten_packages/extension/openai_v2v_python/manifest.json b/agents/ten_packages/extension/openai_v2v_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..721d1aeeaa1b2bc741511921d8c44dd5c44e3031 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/manifest.json @@ -0,0 +1,174 @@ +{ + "type": "extension", + "name": "openai_v2v_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "realtime/**.tent", + "realtime/**.py" + ] + }, + "api": { + "property": { + "base_uri": { + "type": "string" + }, + "api_key": { + "type": "string" + }, + "path": { + "type": "string" + }, + "model": { + "type": "string" + }, + "language": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "temperature": { + "type": "float32" + }, + "max_tokens": { + "type": "int32" + }, + "voice": { + "type": "string" + }, + "server_vad": { + "type": "bool" + }, + "audio_out": { + "type": "bool" + }, + "input_transcript": { + "type": "bool" + }, + "sample_rate": { + "type": "int32" + }, + "vendor": { + "type": "string" + }, + "stream_id": { + "type": "int32" + }, + "dump": { + "type": "bool" + }, + "greeting": { + "type": "string" + }, + "max_history": { + "type": "int32" + }, + "enable_storage": { + "type": "bool" + } + }, + "audio_frame_in": [ + { + "name": "pcm_frame", + "property": { + "stream_id": { + "type": "int64" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + }, + { + "name": "append", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_out": [ + { + "name": "flush" + }, + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_v2v_python/property.json b/agents/ten_packages/extension/openai_v2v_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..3bdbbbb91e0eda77302ee46725ea899b5c558f63 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/property.json @@ -0,0 +1,11 @@ +{ + "api_key": "${env:OPENAI_REALTIME_API_KEY}", + "temperature": 0.9, + "model": "gpt-4o-realtime-preview", + "max_tokens": 2048, + "voice": "alloy", + "language": "en-US", + "server_vad": true, + "history": 10, + "enable_storage": false +} \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_v2v_python/realtime/__init__.py b/agents/ten_packages/extension/openai_v2v_python/realtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/agents/ten_packages/extension/openai_v2v_python/realtime/connection.py b/agents/ten_packages/extension/openai_v2v_python/realtime/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..33d7810f29dc74e1fbb175052113e8c1aab2d4c7 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/realtime/connection.py @@ -0,0 +1,118 @@ +import asyncio +import base64 +import json +import os +import aiohttp + +from ten import AsyncTenEnv + +from typing import Any, AsyncGenerator +from .struct import InputAudioBufferAppend, ClientToServerMessage, ServerToClientMessage, parse_server_message, to_json + +DEFAULT_VIRTUAL_MODEL = "gpt-4o-realtime-preview" + +VENDOR_AZURE = "azure" + +def smart_str(s: str, max_field_len: int = 128) -> str: + """parse string as json, truncate data field to 128 characters, reserialize""" + try: + data = json.loads(s) + if "delta" in data: + key = "delta" + elif "audio" in data: + key = "audio" + else: + return s + + if len(data[key]) > max_field_len: + data[key] = data[key][:max_field_len] + "..." + return json.dumps(data) + except json.JSONDecodeError: + return s + + +class RealtimeApiConnection: + def __init__( + self, + ten_env: AsyncTenEnv, + base_uri: str, + api_key: str | None = None, + path: str = "/v1/realtime", + model: str = DEFAULT_VIRTUAL_MODEL, + vendor: str = "", + verbose: bool = False + ): + self.ten_env = ten_env + self.vendor = vendor + self.url = f"{base_uri}{path}" + if not self.vendor and "model=" not in self.url: + self.url += f"?model={model}" + + self.api_key = api_key or os.environ.get("OPENAI_API_KEY") + self.websocket: aiohttp.ClientWebSocketResponse | None = None + self.verbose = verbose + self.session = aiohttp.ClientSession() + + async def __aenter__(self) -> "RealtimeApiConnection": + await self.connect() + return self + + async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> bool: + await self.close() + return False + + async def connect(self): + headers = {} + auth = None + if self.vendor == VENDOR_AZURE: + headers = {"api-key": self.api_key} + elif not self.vendor: + auth = aiohttp.BasicAuth("", self.api_key) if self.api_key else None + headers = {"OpenAI-Beta": "realtime=v1"} + + self.websocket = await self.session.ws_connect( + url=self.url, + auth=auth, + headers=headers, + ) + + async def send_audio_data(self, audio_data: bytes): + """audio_data is assumed to be pcm16 24kHz mono little-endian""" + base64_audio_data = base64.b64encode(audio_data).decode("utf-8") + message = InputAudioBufferAppend(audio=base64_audio_data) + await self.send_request(message) + + async def send_request(self, message: ClientToServerMessage): + assert self.websocket is not None + message_str = to_json(message) + if self.verbose: + self.ten_env.log_info(f"-> {smart_str(message_str)}") + await self.websocket.send_str(message_str) + + async def listen(self) -> AsyncGenerator[ServerToClientMessage, None]: + assert self.websocket is not None + if self.verbose: + self.ten_env.log_info("Listening for realtimeapi messages") + try: + async for msg in self.websocket: + if msg.type == aiohttp.WSMsgType.TEXT: + if self.verbose: + self.ten_env.log_info(f"<- {smart_str(msg.data)}") + yield self.handle_server_message(msg.data) + elif msg.type == aiohttp.WSMsgType.ERROR: + self.ten_env.log_error("Error during receive: %s", self.websocket.exception()) + break + except asyncio.CancelledError: + self.ten_env.log_info("Receive messages task cancelled") + + def handle_server_message(self, message: str) -> ServerToClientMessage: + try: + return parse_server_message(message) + except Exception as e: + self.ten_env.log_info(f"Error handling message {e}") + + async def close(self): + # Close the websocket connection if it exists + if self.websocket: + await self.websocket.close() + self.websocket = None diff --git a/agents/ten_packages/extension/openai_v2v_python/realtime/struct.py b/agents/ten_packages/extension/openai_v2v_python/realtime/struct.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9f04357d8f7071322f4d6ba733432416b28c9e --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/realtime/struct.py @@ -0,0 +1,735 @@ +import json + +from dataclasses import dataclass, asdict, field, is_dataclass +from typing import Any, Dict, Literal, Optional, List, Set, Union +from enum import Enum +import uuid + + +def generate_event_id() -> str: + return str(uuid.uuid4()) + +# Enums +class Voices(str, Enum): + Alloy = "alloy" + Echo = "echo" + Fable = "fable" + Nova = "nova" + Nova_2 = "nova_2" + Nova_3 = "nova_3" + Nova_4 = "nova_4" + Nova_5 = "nova_5" + Onyx = "onyx" + Shimmer = "shimmer" + +class AudioFormats(str, Enum): + PCM16 = "pcm16" + G711_ULAW = "g711_ulaw" + G711_ALAW = "g711_alaw" + +class ItemType(str, Enum): + Message = "message" + FunctionCall = "function_call" + FunctionCallOutput = "function_call_output" + +class MessageRole(str, Enum): + System = "system" + User = "user" + Assistant = "assistant" + +class ContentType(str, Enum): + InputText = "input_text" + InputAudio = "input_audio" + Text = "text" + Audio = "audio" + +@dataclass +class FunctionToolChoice: + name: str # Name of the function + type: str = "function" # Fixed value for type + +# ToolChoice can be either a literal string or FunctionToolChoice +ToolChoice = Union[str, FunctionToolChoice] # "none", "auto", "required", or FunctionToolChoice + +@dataclass +class RealtimeError: + type: str # The type of the error + message: str # The error message + code: Optional[str] = None # Optional error code + param: Optional[str] = None # Optional parameter related to the error + event_id: Optional[str] = None # Optional event ID for tracing + +@dataclass +class InputAudioTranscription: + model: str = "whisper-1" # Default transcription model is "whisper-1" + +@dataclass +class ServerVADUpdateParams: + threshold: Optional[float] = None # Threshold for voice activity detection + prefix_padding_ms: Optional[int] = None # Amount of padding before the voice starts (in milliseconds) + silence_duration_ms: Optional[int] = None # Duration of silence before considering speech stopped (in milliseconds) + type: str = "server_vad" # Fixed value for VAD type +@dataclass +class Session: + id: str # The unique identifier for the session + model: str # The model associated with the session (e.g., "gpt-3") + expires_at: int # Expiration time of the session in seconds since the epoch (UNIX timestamp) + object: str = "realtime.session" # Fixed value indicating the object type + modalities: Set[str] = field(default_factory=lambda: {"text", "audio"}) # Set of allowed modalities (e.g., "text", "audio") + instructions: Optional[str] = None # Instructions or guidance for the session + voice: Voices = Voices.Alloy # Voice configuration for audio responses, defaulting to "Alloy" + turn_detection: Optional[ServerVADUpdateParams] = None # Voice activity detection (VAD) settings + input_audio_format: AudioFormats = AudioFormats.PCM16 # Audio format for input (e.g., "pcm16") + output_audio_format: AudioFormats = AudioFormats.PCM16 # Audio format for output (e.g., "pcm16") + input_audio_transcription: Optional[InputAudioTranscription] = None # Audio transcription model settings (e.g., "whisper-1") + tools: List[Dict[str, Union[str, Any]]] = field(default_factory=list) # List of tools available during the session + tool_choice: Literal["auto", "none", "required"] = "auto" # How tools should be used in the session + temperature: float = 0.8 # Temperature setting for model creativity + max_response_output_tokens: Union[int, Literal["inf"]] = "inf" # Maximum number of tokens in the response, or "inf" for unlimited + + +@dataclass +class SessionUpdateParams: + model: Optional[str] = None # Optional string to specify the model + modalities: Optional[Set[str]] = None # Set of allowed modalities (e.g., "text", "audio") + instructions: Optional[str] = None # Optional instructions string + voice: Optional[Voices] = None # Voice selection, can be `None` or from `Voices` Enum + turn_detection: Optional[ServerVADUpdateParams] = None # Server VAD update params + input_audio_format: Optional[AudioFormats] = None # Input audio format from `AudioFormats` Enum + output_audio_format: Optional[AudioFormats] = None # Output audio format from `AudioFormats` Enum + input_audio_transcription: Optional[InputAudioTranscription] = None # Optional transcription model + tools: Optional[List[Dict[str, Union[str, any]]]] = None # List of tools (e.g., dictionaries) + tool_choice: Optional[ToolChoice] = None # ToolChoice, either string or `FunctionToolChoice` + temperature: Optional[float] = None # Optional temperature for response generation + max_response_output_tokens: Optional[Union[int, str]] = None # Max response tokens, "inf" for infinite + + +# Define individual message item param types +@dataclass +class SystemMessageItemParam: + content: List[dict] # This can be more specific based on content structure + id: Optional[str] = None + status: Optional[str] = None + type: str = "message" + role: str = "system" + +@dataclass +class UserMessageItemParam: + content: List[dict] # Similarly, content can be more specific + id: Optional[str] = None + status: Optional[str] = None + type: str = "message" + role: str = "user" + +@dataclass +class AssistantMessageItemParam: + content: List[dict] # Content structure here depends on your schema + id: Optional[str] = None + status: Optional[str] = None + type: str = "message" + role: str = "assistant" + +@dataclass +class FunctionCallItemParam: + name: str + call_id: str + arguments: str + type: str = "function_call" + id: Optional[str] = None + status: Optional[str] = None + +@dataclass +class FunctionCallOutputItemParam: + call_id: str + output: str + id: Optional[str] = None + type: str = "function_call_output" + +# Union of all possible item types +ItemParam = Union[ + SystemMessageItemParam, + UserMessageItemParam, + AssistantMessageItemParam, + FunctionCallItemParam, + FunctionCallOutputItemParam +] + + +# Assuming the EventType and other enums are already defined +# For reference: +class EventType(str, Enum): + SESSION_UPDATE = "session.update" + INPUT_AUDIO_BUFFER_APPEND = "input_audio_buffer.append" + INPUT_AUDIO_BUFFER_COMMIT = "input_audio_buffer.commit" + INPUT_AUDIO_BUFFER_CLEAR = "input_audio_buffer.clear" + UPDATE_CONVERSATION_CONFIG = "update_conversation_config" + ITEM_CREATE = "conversation.item.create" + ITEM_TRUNCATE = "conversation.item.truncate" + ITEM_DELETE = "conversation.item.delete" + RESPONSE_CREATE = "response.create" + RESPONSE_CANCEL = "response.cancel" + + ERROR = "error" + SESSION_CREATED = "session.created" + SESSION_UPDATED = "session.updated" + + INPUT_AUDIO_BUFFER_COMMITTED = "input_audio_buffer.committed" + INPUT_AUDIO_BUFFER_CLEARED = "input_audio_buffer.cleared" + INPUT_AUDIO_BUFFER_SPEECH_STARTED = "input_audio_buffer.speech_started" + INPUT_AUDIO_BUFFER_SPEECH_STOPPED = "input_audio_buffer.speech_stopped" + + ITEM_CREATED = "conversation.item.created" + ITEM_DELETED = "conversation.item.deleted" + ITEM_TRUNCATED = "conversation.item.truncated" + ITEM_INPUT_AUDIO_TRANSCRIPTION_COMPLETED = "conversation.item.input_audio_transcription.completed" + ITEM_INPUT_AUDIO_TRANSCRIPTION_FAILED = "conversation.item.input_audio_transcription.failed" + + RESPONSE_CREATED = "response.created" + RESPONSE_CANCELLED = "response.cancelled" + RESPONSE_DONE = "response.done" + RESPONSE_OUTPUT_ITEM_ADDED = "response.output_item.added" + RESPONSE_OUTPUT_ITEM_DONE = "response.output_item.done" + RESPONSE_CONTENT_PART_ADDED = "response.content_part.added" + RESPONSE_CONTENT_PART_DONE = "response.content_part.done" + RESPONSE_TEXT_DELTA = "response.text.delta" + RESPONSE_TEXT_DONE = "response.text.done" + RESPONSE_AUDIO_TRANSCRIPT_DELTA = "response.audio_transcript.delta" + RESPONSE_AUDIO_TRANSCRIPT_DONE = "response.audio_transcript.done" + RESPONSE_AUDIO_DELTA = "response.audio.delta" + RESPONSE_AUDIO_DONE = "response.audio.done" + RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA = "response.function_call_arguments.delta" + RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE = "response.function_call_arguments.done" + RATE_LIMITS_UPDATED = "rate_limits.updated" + +# Base class for all ServerToClientMessages +@dataclass +class ServerToClientMessage: + event_id: str + + +@dataclass +class ErrorMessage(ServerToClientMessage): + error: RealtimeError + type: str = EventType.ERROR + + +@dataclass +class SessionCreated(ServerToClientMessage): + session: Session + type: str = EventType.SESSION_CREATED + + +@dataclass +class SessionUpdated(ServerToClientMessage): + session: Session + type: str = EventType.SESSION_UPDATED + + +@dataclass +class InputAudioBufferCommitted(ServerToClientMessage): + item_id: str + type: str = EventType.INPUT_AUDIO_BUFFER_COMMITTED + previous_item_id: Optional[str] = None + + +@dataclass +class InputAudioBufferCleared(ServerToClientMessage): + type: str = EventType.INPUT_AUDIO_BUFFER_CLEARED + + +@dataclass +class InputAudioBufferSpeechStarted(ServerToClientMessage): + audio_start_ms: int + item_id: str + type: str = EventType.INPUT_AUDIO_BUFFER_SPEECH_STARTED + + +@dataclass +class InputAudioBufferSpeechStopped(ServerToClientMessage): + audio_end_ms: int + type: str = EventType.INPUT_AUDIO_BUFFER_SPEECH_STOPPED + item_id: Optional[str] = None + + +@dataclass +class ItemCreated(ServerToClientMessage): + item: ItemParam + type: str = EventType.ITEM_CREATED + previous_item_id: Optional[str] = None + + +@dataclass +class ItemTruncated(ServerToClientMessage): + item_id: str + content_index: int + audio_end_ms: int + type: str = EventType.ITEM_TRUNCATED + + +@dataclass +class ItemDeleted(ServerToClientMessage): + item_id: str + type: str = EventType.ITEM_DELETED + + +# Assuming the necessary enums, ItemParam, and other classes are defined above +# ResponseStatus could be a string or an enum, depending on your schema + +# Enum or Literal for ResponseStatus (could be more extensive) +ResponseStatus = Union[str, Literal["in_progress", "completed", "cancelled", "incomplete", "failed"]] + +# Define status detail classes +@dataclass +class ResponseCancelledDetails: + reason: str # e.g., "turn_detected", "client_cancelled" + type: str = "cancelled" + +@dataclass +class ResponseIncompleteDetails: + reason: str # e.g., "max_output_tokens", "content_filter" + type: str = "incomplete" + +@dataclass +class ResponseError: + type: str # The type of the error, e.g., "validation_error", "server_error" + message: str # The error message describing what went wrong + code: Optional[str] = None # Optional error code, e.g., HTTP status code, API error code + +@dataclass +class ResponseFailedDetails: + error: ResponseError # Assuming ResponseError is already defined + type: str = "failed" + +# Union of possible status details +ResponseStatusDetails = Union[ResponseCancelledDetails, ResponseIncompleteDetails, ResponseFailedDetails] + +# Define Usage class to handle token usage +@dataclass +class InputTokenDetails: + cached_tokens: int + text_tokens: int + audio_tokens: int + +@dataclass +class OutputTokenDetails: + text_tokens: int + audio_tokens: int + +@dataclass +class Usage: + total_tokens: int + input_tokens: int + output_tokens: int + input_token_details: InputTokenDetails + output_token_details: OutputTokenDetails + +# The Response dataclass definition +@dataclass +class Response: + id: str # Unique ID for the response + output: List[ItemParam] = field(default_factory=list) # List of items in the response + object: str = "realtime.response" # Fixed value for object type + status: ResponseStatus = "in_progress" # Status of the response + status_details: Optional[ResponseStatusDetails] = None # Additional details based on status + usage: Optional[Usage] = None # Token usage information + metadata: Optional[Dict[str, Any]] = None # Additional metadata for the response + + + +@dataclass +class ResponseCreated(ServerToClientMessage): + response: Response + type: str = EventType.RESPONSE_CREATED + + +@dataclass +class ResponseDone(ServerToClientMessage): + response: Response + type: str = EventType.RESPONSE_DONE + + +@dataclass +class ResponseTextDelta(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + content_index: int + delta: str + type: str = EventType.RESPONSE_TEXT_DELTA + + +@dataclass +class ResponseTextDone(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + content_index: int + text: str + type: str = EventType.RESPONSE_TEXT_DONE + + +@dataclass +class ResponseAudioTranscriptDelta(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + content_index: int + delta: str + type: str = EventType.RESPONSE_AUDIO_TRANSCRIPT_DELTA + + +@dataclass +class ResponseAudioTranscriptDone(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + content_index: int + transcript: str + type: str = EventType.RESPONSE_AUDIO_TRANSCRIPT_DONE + + +@dataclass +class ResponseAudioDelta(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + content_index: int + delta: str + type: str = EventType.RESPONSE_AUDIO_DELTA + + +@dataclass +class ResponseAudioDone(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + content_index: int + type: str = EventType.RESPONSE_AUDIO_DONE + + +@dataclass +class ResponseFunctionCallArgumentsDelta(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + call_id: str + delta: str + type: str = EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA + + +@dataclass +class ResponseFunctionCallArgumentsDone(ServerToClientMessage): + response_id: str + item_id: str + output_index: int + call_id: str + name: str + arguments: str + type: str = EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE + + +@dataclass +class RateLimitDetails: + name: str # Name of the rate limit, e.g., "api_requests", "message_generation" + limit: int # The maximum number of allowed requests in the current time window + remaining: int # The number of requests remaining in the current time window + reset_seconds: float # The number of seconds until the rate limit resets + +@dataclass +class RateLimitsUpdated(ServerToClientMessage): + rate_limits: List[RateLimitDetails] + type: str = EventType.RATE_LIMITS_UPDATED + + +@dataclass +class ResponseOutputItemAdded(ServerToClientMessage): + response_id: str # The ID of the response + output_index: int # Index of the output item in the response + item: Union[ItemParam, None] # The added item (can be a message, function call, etc.) + type: str = EventType.RESPONSE_OUTPUT_ITEM_ADDED # Fixed event type + +@dataclass +class ResponseContentPartAdded(ServerToClientMessage): + response_id: str # The ID of the response + item_id: str # The ID of the item to which the content part was added + output_index: int # Index of the output item in the response + content_index: int # Index of the content part in the output + part: Union[ItemParam, None] # The added content part + content: Union[ItemParam, None] = None # The added content part for azure + type: str = EventType.RESPONSE_CONTENT_PART_ADDED # Fixed event type + +@dataclass +class ResponseContentPartDone(ServerToClientMessage): + response_id: str # The ID of the response + item_id: str # The ID of the item to which the content part belongs + output_index: int # Index of the output item in the response + content_index: int # Index of the content part in the output + part: Union[ItemParam, None] # The content part that was completed + content: Union[ItemParam, None] = None # The added content part for azure + type: str = EventType.RESPONSE_CONTENT_PART_ADDED # Fixed event type + +@dataclass +class ResponseOutputItemDone(ServerToClientMessage): + response_id: str # The ID of the response + output_index: int # Index of the output item in the response + item: Union[ItemParam, None] # The output item that was completed + type: str = EventType.RESPONSE_OUTPUT_ITEM_DONE # Fixed event type + +@dataclass +class ItemInputAudioTranscriptionCompleted(ServerToClientMessage): + item_id: str # The ID of the item for which transcription was completed + content_index: int # Index of the content part that was transcribed + transcript: str # The transcribed text + type: str = EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_COMPLETED # Fixed event type + +@dataclass +class ItemInputAudioTranscriptionFailed(ServerToClientMessage): + item_id: str # The ID of the item for which transcription failed + content_index: int # Index of the content part that failed to transcribe + error: ResponseError # Error details explaining the failure + type: str = EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_FAILED # Fixed event type + +# Union of all server-to-client message types +ServerToClientMessages = Union[ + ErrorMessage, + SessionCreated, + SessionUpdated, + InputAudioBufferCommitted, + InputAudioBufferCleared, + InputAudioBufferSpeechStarted, + InputAudioBufferSpeechStopped, + ItemCreated, + ItemTruncated, + ItemDeleted, + ResponseCreated, + ResponseDone, + ResponseTextDelta, + ResponseTextDone, + ResponseAudioTranscriptDelta, + ResponseAudioTranscriptDone, + ResponseAudioDelta, + ResponseAudioDone, + ResponseFunctionCallArgumentsDelta, + ResponseFunctionCallArgumentsDone, + RateLimitsUpdated, + ResponseOutputItemAdded, + ResponseContentPartAdded, + ResponseContentPartDone, + ResponseOutputItemDone, + ItemInputAudioTranscriptionCompleted, + ItemInputAudioTranscriptionFailed +] + + + +# Base class for all ClientToServerMessages +@dataclass +class ClientToServerMessage: + event_id: str = field(default_factory=generate_event_id) + + +@dataclass +class InputAudioBufferAppend(ClientToServerMessage): + audio: Optional[str] = field(default=None) + type: str = EventType.INPUT_AUDIO_BUFFER_APPEND # Default argument (has a default value) + +@dataclass +class InputAudioBufferCommit(ClientToServerMessage): + type: str = EventType.INPUT_AUDIO_BUFFER_COMMIT + + +@dataclass +class InputAudioBufferClear(ClientToServerMessage): + type: str = EventType.INPUT_AUDIO_BUFFER_CLEAR + + +@dataclass +class ItemCreate(ClientToServerMessage): + item: Optional[ItemParam] = field(default=None) # Assuming `ItemParam` is already defined + type: str = EventType.ITEM_CREATE + previous_item_id: Optional[str] = None + + +@dataclass +class ItemTruncate(ClientToServerMessage): + item_id: Optional[str] = field(default=None) + content_index: Optional[int] = field(default=None) + audio_end_ms: Optional[int] = field(default=None) + type: str = EventType.ITEM_TRUNCATE + + +@dataclass +class ItemDelete(ClientToServerMessage): + item_id: Optional[str] = field(default=None) + type: str = EventType.ITEM_DELETE + +@dataclass +class ResponseCreateParams: + commit: bool = True # Whether the generated messages should be appended to the conversation + cancel_previous: bool = True # Whether to cancel the previous pending generation + append_input_items: Optional[List[ItemParam]] = None # Messages to append before response generation + input_items: Optional[List[ItemParam]] = None # Initial messages to use for generation + modalities: Optional[Set[str]] = None # Allowed modalities (e.g., "text", "audio") + instructions: Optional[str] = None # Instructions or guidance for the model + voice: Optional[Voices] = None # Voice setting for audio output + output_audio_format: Optional[AudioFormats] = None # Format for the audio output + tools: Optional[List[Dict[str, Any]]] = None # Tools available for this response + tool_choice: Optional[ToolChoice] = None # How to choose the tool ("auto", "required", etc.) + temperature: Optional[float] = None # The randomness of the model's responses + max_response_output_tokens: Optional[Union[int, str]] = None # Max number of tokens for the output, "inf" for infinite + + +@dataclass +class ResponseCreate(ClientToServerMessage): + type: str = EventType.RESPONSE_CREATE + response: Optional[ResponseCreateParams] = None # Assuming `ResponseCreateParams` is defined + + +@dataclass +class ResponseCancel(ClientToServerMessage): + type: str = EventType.RESPONSE_CANCEL + +DEFAULT_CONVERSATION = "default" + +@dataclass +class UpdateConversationConfig(ClientToServerMessage): + type: str = EventType.UPDATE_CONVERSATION_CONFIG + label: str = DEFAULT_CONVERSATION + subscribe_to_user_audio: Optional[bool] = None + voice: Optional[Voices] = None + system_message: Optional[str] = None + temperature: Optional[float] = None + max_tokens: Optional[int] = None + tools: Optional[List[dict]] = None + tool_choice: Optional[ToolChoice] = None + disable_audio: Optional[bool] = None + output_audio_format: Optional[AudioFormats] = None + + +@dataclass +class SessionUpdate(ClientToServerMessage): + session: Optional[SessionUpdateParams] = field(default=None) # Assuming `SessionUpdateParams` is defined + type: str = EventType.SESSION_UPDATE + + +# Union of all client-to-server message types +ClientToServerMessages = Union[ + InputAudioBufferAppend, + InputAudioBufferCommit, + InputAudioBufferClear, + ItemCreate, + ItemTruncate, + ItemDelete, + ResponseCreate, + ResponseCancel, + UpdateConversationConfig, + SessionUpdate +] + +def from_dict(data_class, data): + """Recursively convert a dictionary to a dataclass instance.""" + if is_dataclass(data_class): # Check if the target class is a dataclass + fieldtypes = {f.name: f.type for f in data_class.__dataclass_fields__.values()} + # Filter out keys that are not in the dataclass fields + valid_data = {f: data[f] for f in fieldtypes if f in data} + return data_class(**{f: from_dict(fieldtypes[f], valid_data[f]) for f in valid_data}) + elif isinstance(data, list): # Handle lists of nested dataclass objects + return [from_dict(data_class.__args__[0], item) for item in data] + else: # For primitive types (str, int, float, etc.), return the value as-is + return data + +def parse_client_message(unparsed_string: str) -> ClientToServerMessage: + data = json.loads(unparsed_string) + + # Dynamically select the correct message class based on the `type` field, using from_dict + if data["type"] == EventType.INPUT_AUDIO_BUFFER_APPEND: + return from_dict(InputAudioBufferAppend, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_COMMIT: + return from_dict(InputAudioBufferCommit, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_CLEAR: + return from_dict(InputAudioBufferClear, data) + elif data["type"] == EventType.ITEM_CREATE: + return from_dict(ItemCreate, data) + elif data["type"] == EventType.ITEM_TRUNCATE: + return from_dict(ItemTruncate, data) + elif data["type"] == EventType.ITEM_DELETE: + return from_dict(ItemDelete, data) + elif data["type"] == EventType.RESPONSE_CREATE: + return from_dict(ResponseCreate, data) + elif data["type"] == EventType.RESPONSE_CANCEL: + return from_dict(ResponseCancel, data) + elif data["type"] == EventType.UPDATE_CONVERSATION_CONFIG: + return from_dict(UpdateConversationConfig, data) + elif data["type"] == EventType.SESSION_UPDATE: + return from_dict(SessionUpdate, data) + + raise ValueError(f"Unknown message type: {data['type']}") + + +# Assuming all necessary classes and enums (EventType, ServerToClientMessages, etc.) are imported +# Here’s how you can dynamically parse a server-to-client message based on the `type` field: + +def parse_server_message(unparsed_string: str) -> ServerToClientMessage: + data = json.loads(unparsed_string) + + # Dynamically select the correct message class based on the `type` field, using from_dict + if data["type"] == EventType.ERROR: + return from_dict(ErrorMessage, data) + elif data["type"] == EventType.SESSION_CREATED: + return from_dict(SessionCreated, data) + elif data["type"] == EventType.SESSION_UPDATED: + return from_dict(SessionUpdated, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_COMMITTED: + return from_dict(InputAudioBufferCommitted, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_CLEARED: + return from_dict(InputAudioBufferCleared, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_SPEECH_STARTED: + return from_dict(InputAudioBufferSpeechStarted, data) + elif data["type"] == EventType.INPUT_AUDIO_BUFFER_SPEECH_STOPPED: + return from_dict(InputAudioBufferSpeechStopped, data) + elif data["type"] == EventType.ITEM_CREATED: + return from_dict(ItemCreated, data) + elif data["type"] == EventType.ITEM_TRUNCATED: + return from_dict(ItemTruncated, data) + elif data["type"] == EventType.ITEM_DELETED: + return from_dict(ItemDeleted, data) + elif data["type"] == EventType.RESPONSE_CREATED: + return from_dict(ResponseCreated, data) + elif data["type"] == EventType.RESPONSE_DONE: + return from_dict(ResponseDone, data) + elif data["type"] == EventType.RESPONSE_TEXT_DELTA: + return from_dict(ResponseTextDelta, data) + elif data["type"] == EventType.RESPONSE_TEXT_DONE: + return from_dict(ResponseTextDone, data) + elif data["type"] == EventType.RESPONSE_AUDIO_TRANSCRIPT_DELTA: + return from_dict(ResponseAudioTranscriptDelta, data) + elif data["type"] == EventType.RESPONSE_AUDIO_TRANSCRIPT_DONE: + return from_dict(ResponseAudioTranscriptDone, data) + elif data["type"] == EventType.RESPONSE_AUDIO_DELTA: + return from_dict(ResponseAudioDelta, data) + elif data["type"] == EventType.RESPONSE_AUDIO_DONE: + return from_dict(ResponseAudioDone, data) + elif data["type"] == EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA: + return from_dict(ResponseFunctionCallArgumentsDelta, data) + elif data["type"] == EventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE: + return from_dict(ResponseFunctionCallArgumentsDone, data) + elif data["type"] == EventType.RATE_LIMITS_UPDATED: + return from_dict(RateLimitsUpdated, data) + elif data["type"] == EventType.RESPONSE_OUTPUT_ITEM_ADDED: + return from_dict(ResponseOutputItemAdded, data) + elif data["type"] == EventType.RESPONSE_CONTENT_PART_ADDED: + return from_dict(ResponseContentPartAdded, data) + elif data["type"] == EventType.RESPONSE_CONTENT_PART_DONE: + return from_dict(ResponseContentPartDone, data) + elif data["type"] == EventType.RESPONSE_OUTPUT_ITEM_DONE: + return from_dict(ResponseOutputItemDone, data) + elif data["type"] == EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_COMPLETED: + return from_dict(ItemInputAudioTranscriptionCompleted, data) + elif data["type"] == EventType.ITEM_INPUT_AUDIO_TRANSCRIPTION_FAILED: + return from_dict(ItemInputAudioTranscriptionFailed, data) + + raise ValueError(f"Unknown message type: {data['type']}") + +def to_json(obj: Union[ClientToServerMessage, ServerToClientMessage]) -> str: + # ignore none value + return json.dumps(asdict(obj, dict_factory=lambda x: {k: v for (k, v) in x if v is not None})) \ No newline at end of file diff --git a/agents/ten_packages/extension/openai_v2v_python/requirements.txt b/agents/ten_packages/extension/openai_v2v_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2984efb6a4f6a2ea002d2bc6f59c474b0aacc23 --- /dev/null +++ b/agents/ten_packages/extension/openai_v2v_python/requirements.txt @@ -0,0 +1,6 @@ +asyncio +pydantic +numpy==1.26.4 +sounddevice==0.4.7 +pydub==0.25.1 +aiohttp \ No newline at end of file diff --git a/agents/ten_packages/extension/polly_tts/README.md b/agents/ten_packages/extension/polly_tts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..92823a6b2fae4a57356fc0919837a9e5c3026407 --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/README.md @@ -0,0 +1,11 @@ +## Amazon Polly TTS Extension + +### Configurations + +You can config this extension by providing following environments: + +| Env | Required | Default | Notes | +| -- | -- | -- | -- | +| AWS_TTS_REGION | No | us-east-1 | The Region of Amazon Bedrock service you want to use. | +| AWS_TTS_ACCESS_KEY_ID | No | - | Access Key of your IAM User, make sure you've set proper permissions to [synthesize speech](https://docs.aws.amazon.com/polly/latest/dg/security_iam_id-based-policy-examples.html#example-managed-policy-service-admin). Will use default credentials provider if not provided. Check [document](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). | +| AWS_TTS_SECRET_ACCESS_KEY | No | - | Secret Key of your IAM User, make sure you've set proper permissions to [synthesize speech](https://docs.aws.amazon.com/polly/latest/dg/security_iam_id-based-policy-examples.html#example-managed-policy-service-admin). Will use default credentials provider if not provided. Check [document](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). | \ No newline at end of file diff --git a/agents/ten_packages/extension/polly_tts/__init__.py b/agents/ten_packages/extension/polly_tts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9e61a47285b537bdceab2bd7e450a8ea2324c1a7 --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon \ No newline at end of file diff --git a/agents/ten_packages/extension/polly_tts/addon.py b/agents/ten_packages/extension/polly_tts/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..1ba59748500809b1d78575c87599b713f9cb116f --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/addon.py @@ -0,0 +1,17 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + +@register_addon_as_extension("polly_tts") +class PollyTTSExtensionAddon(Addon): + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import PollyTTSExtension + ten_env.log_info("polly tts on_create_instance") + ten_env.on_create_instance_done(PollyTTSExtension(name), context) diff --git a/agents/ten_packages/extension/polly_tts/extension.py b/agents/ten_packages/extension/polly_tts/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..33ecf5494cf9d7c88f4ef8bd833a892e3e361e0d --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/extension.py @@ -0,0 +1,71 @@ +from ten_ai_base.tts import AsyncTTSBaseExtension +from .polly_tts import PollyTTS, PollyTTSConfig +import traceback +from ten import ( + AsyncTenEnv, +) + +PROPERTY_REGION = "region" # Optional +PROPERTY_ACCESS_KEY = "access_key" # Optional +PROPERTY_SECRET_KEY = "secret_key" # Optional +PROPERTY_ENGINE = "engine" # Optional +PROPERTY_VOICE = "voice" # Optional +PROPERTY_SAMPLE_RATE = "sample_rate" # Optional +PROPERTY_LANG_CODE = "lang_code" # Optional + + +class PollyTTSExtension(AsyncTTSBaseExtension): + def __init__(self, name: str): + super().__init__(name) + self.client = None + self.config = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + await super().on_init(ten_env) + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + try: + await super().on_start(ten_env) + ten_env.log_debug("on_start") + self.config = await PollyTTSConfig.create_async(ten_env=ten_env) + + if not self.config.access_key or not self.config.secret_key: + raise ValueError("access_key and secret_key are required") + + self.client = PollyTTS(self.config, ten_env) + except Exception: + ten_env.log_error(f"on_start failed: {traceback.format_exc()}") + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + await super().on_stop(ten_env) + ten_env.log_debug("on_stop") + + # TODO: clean up resources + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + await super().on_deinit(ten_env) + ten_env.log_debug("on_deinit") + + async def on_request_tts( + self, ten_env: AsyncTenEnv, input_text: str, end_of_segment: bool + ) -> None: + try: + data = self.client.text_to_speech_stream(ten_env, input_text, end_of_segment) + async for frame in data: + await self.send_audio_out( + ten_env, frame, sample_rate=self.client.config.sample_rate + ) + except Exception: + ten_env.log_error(f"on_request_tts failed: {traceback.format_exc()}") + + async def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + """ + Cancel ongoing TTS operation + """ + await super().on_cancel_tts(ten_env) + try: + if self.client: + self.client.on_cancel_tts(ten_env) + except Exception: + ten_env.log_error(f"on_cancel_tts failed: {traceback.format_exc()}") diff --git a/agents/ten_packages/extension/polly_tts/manifest.json b/agents/ten_packages/extension/polly_tts/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..5345c69c5c7e2fdad88bbea7c430cc0fcaf68904 --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/manifest.json @@ -0,0 +1,73 @@ +{ + "type": "extension", + "name": "polly_tts", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": { + "region": { + "type": "string" + }, + "access_key": { + "type": "string" + }, + "secret_key": { + "type": "string" + }, + "engine": { + "type": "string" + }, + "voice": { + "type": "string" + }, + "sample_rate": { + "type": "int64" + }, + "lang_code": { + "type": "string" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + } + ], + "cmd_out": [ + { + "name": "flush" + } + ], + "audio_frame_out": [ + { + "name": "pcm_frame" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/polly_tts/polly_tts.py b/agents/ten_packages/extension/polly_tts/polly_tts.py new file mode 100644 index 0000000000000000000000000000000000000000..5b118dba6bc709571362edeec0d9c9f3e8573cb1 --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/polly_tts.py @@ -0,0 +1,117 @@ +from dataclasses import dataclass +import traceback +import json +from typing import AsyncIterator +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig +import boto3 +from botocore.exceptions import ClientError +from contextlib import closing + + +@dataclass +class PollyTTSConfig(BaseConfig): + region: str = "us-east-1" + access_key: str = "" + secret_key: str = "" + engine: str = "neural" + voice: str = ( + "Matthew" # https://docs.aws.amazon.com/polly/latest/dg/available-voices.html + ) + sample_rate: int = 16000 + lang_code: str = "en-US" + bytes_per_sample: int = 2 + include_visemes: bool = False + number_of_channels: int = 1 + audio_format: str = "pcm" + + +class PollyTTS: + def __init__(self, config: PollyTTSConfig, ten_env: AsyncTenEnv) -> None: + """ + :param config: A PollyConfig + """ + ten_env.log_info("startinit polly tts") + self.config = config + if config.access_key and config.secret_key: + self.client = boto3.client( + service_name="polly", + region_name=config.region, + aws_access_key_id=config.access_key, + aws_secret_access_key=config.secret_key, + ) + else: + self.client = boto3.client(service_name="polly", region_name=config.region) + + self.voice_metadata = None + self.frame_size = int( + int(config.sample_rate) + * self.config.number_of_channels + * self.config.bytes_per_sample + / 100 + ) + self.audio_stream = None + + def _synthesize(self, text, ten_env: AsyncTenEnv): + """ + Synthesizes speech or speech marks from text, using the specified voice. + + :param text: The text to synthesize. + :return: The audio stream that contains the synthesized speech and a list + of visemes that are associated with the speech audio. + """ + try: + kwargs = { + "Engine": self.config.engine, + "OutputFormat": self.config.audio_format, + "Text": text, + "VoiceId": self.config.voice, + } + if self.config.lang_code is not None: + kwargs["LanguageCode"] = self.config.lang_code + response = self.client.synthesize_speech(**kwargs) + audio_stream = response["AudioStream"] + visemes = None + if self.config.include_visemes: + kwargs["OutputFormat"] = "json" + kwargs["SpeechMarkTypes"] = ["viseme"] + response = self.client.synthesize_speech(**kwargs) + visemes = [ + json.loads(v) + for v in response["AudioStream"].read().decode().split() + if v + ] + ten_env.log_debug("Got %s visemes.", len(visemes)) + except ClientError: + ten_env.log_error("Couldn't get audio stream.") + raise + else: + return audio_stream, visemes + + async def text_to_speech_stream( + self, ten_env: AsyncTenEnv, text: str, end_of_segment: bool + ) -> AsyncIterator[bytes]: + inputText = text + if len(inputText) == 0: + ten_env.log_warning("async_polly_handler: empty input detected.") + try: + audio_stream, _ = self._synthesize(inputText, ten_env) + with closing(audio_stream) as stream: + for chunk in stream.iter_chunks(chunk_size=self.frame_size): + yield chunk + if end_of_segment: + ten_env.log_debug("End of segment reached") + except Exception: + ten_env.log_error(traceback.format_exc()) + + def on_cancel_tts(self, ten_env: AsyncTenEnv) -> None: + """ + Cancel ongoing TTS operation + """ + try: + if hasattr(self, 'audio_stream') and self.audio_stream: + self.audio_stream.close() + self.audio_stream = None + ten_env.log_debug("TTS cancelled successfully") + except Exception: + ten_env.log_error(f"Failed to cancel TTS: {traceback.format_exc()}") diff --git a/agents/ten_packages/extension/polly_tts/property.json b/agents/ten_packages/extension/polly_tts/property.json new file mode 100644 index 0000000000000000000000000000000000000000..a6d43852a7e8ca351250f4b087be5e3120be5f46 --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/property.json @@ -0,0 +1,9 @@ +{ + "region": "us-east-1", + "access_key": "${env:AWS_ACCESS_KEY_ID}", + "secret_key": "${env:AWS_SECRET_ACCESS_KEY}", + "engine": "generative", + "voice": "Ruth", + "sample_rate": 16000, + "lang_code": "en-US" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/polly_tts/requirements.txt b/agents/ten_packages/extension/polly_tts/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6179f113f86e4647170eb04509a42376e4bab6ba --- /dev/null +++ b/agents/ten_packages/extension/polly_tts/requirements.txt @@ -0,0 +1 @@ +boto3>=1.26.0 \ No newline at end of file diff --git a/agents/ten_packages/extension/qwen_llm_python/__init__.py b/agents/ten_packages/extension/qwen_llm_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..43f1c856054d10bc3a88b27c2e0c7a711ab02a95 --- /dev/null +++ b/agents/ten_packages/extension/qwen_llm_python/__init__.py @@ -0,0 +1 @@ +from . import qwen_llm_addon diff --git a/agents/ten_packages/extension/qwen_llm_python/manifest.json b/agents/ten_packages/extension/qwen_llm_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..a4c9ae667aff1ff86a58eeb783e1eb35c2ec9c31 --- /dev/null +++ b/agents/ten_packages/extension/qwen_llm_python/manifest.json @@ -0,0 +1,94 @@ +{ + "type": "extension", + "name": "qwen_llm_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "api_key": { + "type": "string" + }, + "model": { + "type": "string" + }, + "max_tokens": { + "type": "int64" + }, + "prompt": { + "type": "string" + }, + "greeting": { + "type": "string" + }, + "max_memory_length": { + "type": "int64" + } + }, + "data_in": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + } + } + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "text": { + "type": "string" + }, + "end_of_segment": { + "type": "bool" + } + } + } + ], + "cmd_in": [ + { + "name": "flush" + }, + { + "name": "call_chat", + "property": { + "messages": { + "type": "string" + }, + "stream": { + "type": "bool" + } + }, + "required": [ + "messages" + ], + "result": { + "property": { + "text": { + "type": "string" + } + }, + "required": [ + "text" + ] + } + } + ], + "cmd_out": [ + { + "name": "flush" + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/qwen_llm_python/property.json b/agents/ten_packages/extension/qwen_llm_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/qwen_llm_python/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/qwen_llm_python/qwen_llm_addon.py b/agents/ten_packages/extension/qwen_llm_python/qwen_llm_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..cb7201cf31ad44110ec78a4bbae0bd1274567460 --- /dev/null +++ b/agents/ten_packages/extension/qwen_llm_python/qwen_llm_addon.py @@ -0,0 +1,22 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-05. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("qwen_llm_python") +class QWenLLMExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context): + from .qwen_llm_extension import QWenLLMExtension + + ten.log_info("on_create_instance") + ten.on_create_instance_done(QWenLLMExtension(addon_name), context) + diff --git a/agents/ten_packages/extension/qwen_llm_python/qwen_llm_extension.py b/agents/ten_packages/extension/qwen_llm_python/qwen_llm_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a00812474bea656c6f453dbb5382a49fdc05d3f8 --- /dev/null +++ b/agents/ten_packages/extension/qwen_llm_python/qwen_llm_extension.py @@ -0,0 +1,281 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-05. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Extension, + TenEnv, + Cmd, + Data, + StatusCode, + CmdResult, +) +from typing import List, Any +import dashscope +import queue +import json +from datetime import datetime +import threading +import re +from http import HTTPStatus + +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT = "end_of_segment" + + +class QWenLLMExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + self.history = [] + self.api_key = "" + self.model = "" + self.prompt = "" + self.max_history = 10 + self.stopped = False + self.thread = None + self.sentence_expr = re.compile(r".+?[,,.。!!??::]", re.DOTALL) + + self.outdate_ts = datetime.now() + self.outdate_ts_lock = threading.Lock() + + self.queue = queue.Queue() + self.mutex = threading.Lock() + + def on_msg(self, role: str, content: str) -> None: + self.mutex.acquire() + try: + self.history.append({"role": role, "content": content}) + if len(self.history) > self.max_history: + self.history = self.history[1:] + finally: + self.mutex.release() + + def get_messages(self) -> List[Any]: + messages = [] + if len(self.prompt) > 0: + messages.append({"role": "system", "content": self.prompt}) + self.mutex.acquire() + try: + for h in self.history: + messages.append(h) + finally: + self.mutex.release() + return messages + + def need_interrupt(self, ts: datetime.time) -> bool: + with self.outdate_ts_lock: + return self.outdate_ts > ts + + def get_outdate_ts(self) -> datetime: + with self.outdate_ts_lock: + return self.outdate_ts + + def complete_with_history(self, ten: TenEnv, ts: datetime.time, input_text: str): + """ + Complete input_text querying with built-in chat history. + """ + + def callback(text: str, end_of_segment: bool): + d = Data.create("text_data") + d.set_property_string("text", text) + d.set_property_bool("end_of_segment", end_of_segment) + ten.send_data(d) + + messages = self.get_messages() + messages.append({"role": "user", "content": input_text}) + total = self.stream_chat(ts, messages, callback) + self.on_msg("user", input_text) + if len(total) > 0: + self.on_msg("assistant", total) + + def call_chat(self, ten: TenEnv, ts: datetime.time, cmd: Cmd): + """ + Respond to call_chat cmd and return results in streaming. + The incoming 'messages' will contains all the system prompt, chat history and question. + """ + + start_time = datetime.now() + curr_ttfs = None # time to first sentence + + def callback(text: str, end_of_segment: bool): + nonlocal curr_ttfs + if curr_ttfs is None: + curr_ttfs = datetime.now() - start_time + ten.log_info( + f"TTFS {int(curr_ttfs.total_seconds() * 1000)}ms, sentence {text} end_of_segment {end_of_segment}" + ) + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("text", text) + if end_of_segment: + cmd_result.set_is_final(True) # end of streaming return + else: + cmd_result.set_is_final(False) # keep streaming return + ten.log_info(f"call_chat cmd return_result {cmd_result.to_json()}") + ten.return_result(cmd_result, cmd) + + messages_str = cmd.get_property_string("messages") + messages = json.loads(messages_str) + stream = False + try: + stream = cmd.get_property_bool("stream") + except Exception: + ten.log_warn("stream property not found, default to False") + + if stream: + self.stream_chat(ts, messages, callback) + else: + total = self.stream_chat(ts, messages, None) + callback(total, True) # callback once until full answer returned + + def stream_chat(self, ts: datetime.time, messages: List[Any], callback): + ten = self.ten + ten.log_info(f"before stream_chat call {messages} {ts}") + + if self.need_interrupt(ts): + ten.log_warn("out of date, %s, %s", self.get_outdate_ts(), ts) + return + + responses = dashscope.Generation.call( + self.model, + messages=messages, + result_format="message", # set the result to be "message" format. + stream=True, # set streaming output + incremental_output=True, # get streaming output incrementally + ) + + total = "" + partial = "" + for response in responses: + if self.need_interrupt(ts): + ten.log_warn("out of date, %s, %s", self.get_outdate_ts(), ts) + partial = "" # discard not sent + break + if response.status_code == HTTPStatus.OK: + temp = response.output.choices[0]["message"]["content"] + if len(temp) == 0: + continue + partial += temp + total += temp + + m = self.sentence_expr.match(partial) + if m is not None: + sentence = m.group(0) + partial = partial[m.end(0) :] + if callback is not None: + callback(sentence, False) + + else: + ten.log_warn( + f"request_id: {response.request_id}, status_code: {response.status_code}, error code: {response.code}, error message: {response.message}" + ) + break + + # always send end_of_segment + if callback is not None: + callback(partial, True) + ten.log_info(f"stream_chat full_answer {total}") + return total + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("on_start") + self.api_key = ten.get_property_string("api_key") + self.model = ten.get_property_string("model") + self.prompt = ten.get_property_string("prompt") + self.max_history = ten.get_property_int("max_memory_length") + greeting = ten.get_property_string("greeting") + + if greeting: + try: + output_data = Data.create("text_data") + output_data.set_property_string( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT, greeting + ) + output_data.set_property_bool( + DATA_OUT_TEXT_DATA_PROPERTY_TEXT_END_OF_SEGMENT, True + ) + ten.send_data(output_data) + ten.log_info(f"greeting [{greeting}] sent") + except Exception as e: + ten.log_error(f"greeting [{greeting}] send failed, err: {e}") + + dashscope.api_key = self.api_key + self.thread = threading.Thread(target=self.async_handle, args=[ten]) + self.thread.start() + ten.on_start_done() + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("on_stop") + self.stopped = True + self.flush() + self.queue.put(None) + if self.thread is not None: + self.thread.join() + self.thread = None + ten.on_stop_done() + + def flush(self): + with self.outdate_ts_lock: + self.outdate_ts = datetime.now() + + while not self.queue.empty(): + self.queue.get() + + def on_data(self, ten: TenEnv, data: Data) -> None: + ten.log_info("on_data") + is_final = data.get_property_bool("is_final") + if not is_final: + ten.log_info("ignore non final") + return + + input_text = data.get_property_string("text") + if len(input_text) == 0: + ten.log_info("ignore empty text") + return + + ts = datetime.now() + ten.log_info("on data %s, %s", input_text, ts) + self.queue.put((input_text, ts)) + + def async_handle(self, ten: TenEnv): + while not self.stopped: + try: + value = self.queue.get() + if value is None: + break + chat_input, ts = value + if self.need_interrupt(ts): + continue + + if isinstance(chat_input, str): + ten.log_info(f"fetched from queue {chat_input}") + self.complete_with_history(ten, ts, chat_input) + else: + ten.log_info(f"fetched from queue {chat_input.get_name()}") + self.call_chat(ten, ts, chat_input) + except Exception as e: + ten.log_error(str(e)) + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + ts = datetime.now() + cmd_name = cmd.get_name() + ten.log_info(f"on_cmd {cmd_name}, {ts}") + + if cmd_name == "flush": + self.flush() + cmd_out = Cmd.create("flush") + ten.send_cmd( + cmd_out, + lambda ten, result, _: ten.log_info("send_cmd flush done"), + ) + elif cmd_name == "call_chat": + self.queue.put((cmd, ts)) + return # cmd_result will be returned once it's processed + else: + ten.log_info(f"unknown cmd {cmd_name}") + + cmd_result = CmdResult.create(StatusCode.OK) + ten.return_result(cmd_result, cmd) diff --git a/agents/ten_packages/extension/qwen_llm_python/requirements.txt b/agents/ten_packages/extension/qwen_llm_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f1c09c9e2e571c8558073f2e8ece33609cd129cc --- /dev/null +++ b/agents/ten_packages/extension/qwen_llm_python/requirements.txt @@ -0,0 +1 @@ +dashscope==1.20.0 \ No newline at end of file diff --git a/agents/ten_packages/extension/transcribe_asr_python/README.md b/agents/ten_packages/extension/transcribe_asr_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8bab60d2258cacb5e97d606f9aef2dec27412a60 --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/README.md @@ -0,0 +1,11 @@ +## Amazon Transcribe ASR Extension + +### Configurations + +You can config this extension by providing following environments: + +| Env | Required | Default | Notes | +| -- | -- | -- | -- | +| AWS_REGION | No | us-east-1 | The Region of Amazon Transcribe service you want to use. | +| AWS_ACCESS_KEY_ID | No | - | Access Key of your IAM User, make sure you've set proper permissions to [start stream transcription](https://docs.aws.amazon.com/transcribe/latest/APIReference/API_streaming_StartStreamTranscription.html). Will use default credentials provider if not provided. Check [document](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). | +| AWS_SECRET_ACCESS_KEY | No | - | Secret Key of your IAM User. Will use default credentials provider if not provided. Check [document](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html). | \ No newline at end of file diff --git a/agents/ten_packages/extension/transcribe_asr_python/__init__.py b/agents/ten_packages/extension/transcribe_asr_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61ab1b45fc24125336d9dd2262a875334b2d2afa --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/__init__.py @@ -0,0 +1 @@ +from . import transcribe_asr_addon diff --git a/agents/ten_packages/extension/transcribe_asr_python/manifest.json b/agents/ten_packages/extension/transcribe_asr_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..6d3ebc3096a2c8588db3bbab526fdf5b33960881 --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/manifest.json @@ -0,0 +1,79 @@ +{ + "type": "extension", + "name": "transcribe_asr_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "api": { + "property": { + "region": { + "type": "string" + }, + "access_key": { + "type": "string" + }, + "secret_key": { + "type": "string" + }, + "sample_rate": { + "type": "string" + }, + "lang_code": { + "type": "string" + } + }, + "audio_frame_in": [ + { + "name": "pcm_frame", + "property": {} + } + ], + "cmd_in": [ + { + "name": "on_user_joined", + "property": {} + }, + { + "name": "on_user_left", + "property": {} + }, + { + "name": "on_connection_failure", + "property": {} + } + ], + "data_out": [ + { + "name": "text_data", + "property": { + "time": { + "type": "int64" + }, + "duration_ms": { + "type": "int64" + }, + "language": { + "type": "string" + }, + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + }, + "stream_id": { + "type": "uint32" + }, + "end_of_segment": { + "type": "bool" + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/transcribe_asr_python/property.json b/agents/ten_packages/extension/transcribe_asr_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..4ddf188164f0e19dce2c9e577bc49022ae0644cf --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/property.json @@ -0,0 +1,7 @@ +{ + "region": "us-east-1", + "access_key": "${env:AWS_ACCESS_KEY_ID}", + "secret_key": "${env:AWS_SECRET_ACCESS_KEY}", + "sample_rate": "16000", + "lang_code": "en-US" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/transcribe_asr_python/requirements.txt b/agents/ten_packages/extension/transcribe_asr_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb958819ece8cbb72ca078c479071e5e554b1f52 --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/requirements.txt @@ -0,0 +1 @@ +amazon-transcribe==0.6.2 \ No newline at end of file diff --git a/agents/ten_packages/extension/transcribe_asr_python/transcribe_asr_addon.py b/agents/ten_packages/extension/transcribe_asr_python/transcribe_asr_addon.py new file mode 100644 index 0000000000000000000000000000000000000000..879277cbd3daa8d0146b448b315cd2f44d6306c9 --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/transcribe_asr_addon.py @@ -0,0 +1,12 @@ +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + +@register_addon_as_extension("transcribe_asr_python") +class TranscribeAsrExtensionAddon(Addon): + def on_create_instance(self, ten: TenEnv, addon_name: str, context) -> None: + from .transcribe_asr_extension import TranscribeAsrExtension + ten.log_info("on_create_instance") + ten.on_create_instance_done(TranscribeAsrExtension(addon_name), context) diff --git a/agents/ten_packages/extension/transcribe_asr_python/transcribe_asr_extension.py b/agents/ten_packages/extension/transcribe_asr_python/transcribe_asr_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5df17cfc75f4d2aad1765d01b8c2c380376286 --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/transcribe_asr_extension.py @@ -0,0 +1,110 @@ +from ten import ( + Extension, + TenEnv, + Cmd, + AudioFrame, + StatusCode, + CmdResult, +) + +import asyncio +import threading + +from .transcribe_wrapper import AsyncTranscribeWrapper, TranscribeConfig + +PROPERTY_REGION = "region" # Optional +PROPERTY_ACCESS_KEY = "access_key" # Optional +PROPERTY_SECRET_KEY = "secret_key" # Optional +PROPERTY_SAMPLE_RATE = "sample_rate" # Optional +PROPERTY_LANG_CODE = "lang_code" # Optional + + +class TranscribeAsrExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + + self.stopped = False + self.queue = asyncio.Queue(maxsize=3000) # about 3000 * 10ms = 30s input + self.transcribe = None + self.thread = None + + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + def on_start(self, ten: TenEnv) -> None: + ten.log_info("TranscribeAsrExtension on_start") + + transcribe_config = TranscribeConfig.default_config() + + for optional_param in [ + PROPERTY_REGION, + PROPERTY_SAMPLE_RATE, + PROPERTY_LANG_CODE, + PROPERTY_ACCESS_KEY, + PROPERTY_SECRET_KEY, + ]: + try: + value = ten.get_property_string(optional_param).strip() + if value: + transcribe_config.__setattr__(optional_param, value) + except Exception as err: + ten.log_debug( + f"GetProperty optional {optional_param} failed, err: {err}. Using default value: {transcribe_config.__getattribute__(optional_param)}" + ) + + self.transcribe = AsyncTranscribeWrapper( + transcribe_config, self.queue, ten, self.loop + ) + + ten.log_info("Starting async_transcribe_wrapper thread") + self.thread = threading.Thread(target=self.transcribe.run, args=[]) + self.thread.start() + + ten.on_start_done() + + def put_pcm_frame(self, ten: TenEnv, pcm_frame: AudioFrame) -> None: + if self.stopped: + return + + try: + # Use a simpler synchronous approach with put_nowait + if not self.loop.is_closed(): + if self.queue.qsize() < self.queue.maxsize: + self.loop.call_soon_threadsafe( + self.queue.put_nowait, pcm_frame + ) + else: + ten.log_error("Queue is full, dropping frame") + else: + ten.log_error("Event loop is closed, cannot process frame") + except Exception as e: + import traceback + error_msg = f"Error putting frame in queue: {str(e)}\n{traceback.format_exc()}" + ten.log_error(error_msg) + + def on_audio_frame(self, ten: TenEnv, frame: AudioFrame) -> None: + self.put_pcm_frame(ten, pcm_frame=frame) + + def on_stop(self, ten: TenEnv) -> None: + ten.log_info("TranscribeAsrExtension on_stop") + + # put an empty frame to stop transcribe_wrapper + self.put_pcm_frame(ten, None) + self.stopped = True + self.thread.join() + self.loop.stop() + self.loop.close() + + ten.on_stop_done() + + def on_cmd(self, ten: TenEnv, cmd: Cmd) -> None: + ten.log_info("TranscribeAsrExtension on_cmd") + cmd_json = cmd.to_json() + ten.log_info(f"TranscribeAsrExtension on_cmd json: {cmd_json}") + + cmdName = cmd.get_name() + ten.log_info(f"got cmd {cmdName}") + + cmd_result = CmdResult.create(StatusCode.OK) + cmd_result.set_property_string("detail", "success") + ten.return_result(cmd_result, cmd) diff --git a/agents/ten_packages/extension/transcribe_asr_python/transcribe_config.py b/agents/ten_packages/extension/transcribe_asr_python/transcribe_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e404d0912febbefe89381304bdd1933d2e65ed3e --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/transcribe_config.py @@ -0,0 +1,29 @@ +from typing import Union + +class TranscribeConfig: + def __init__(self, + region: str, + access_key: str, + secret_key: str, + sample_rate: Union[str, int], + lang_code: str): + self.region = region + self.access_key = access_key + self.secret_key = secret_key + + self.lang_code = lang_code + self.sample_rate = int(sample_rate) + + self.media_encoding = 'pcm' + self.bytes_per_sample = 2, + self.channel_nums = 1 + + @classmethod + def default_config(cls): + return cls( + region="us-east-1", + access_key="", + secret_key="", + sample_rate=16000, + lang_code='en-US' + ) \ No newline at end of file diff --git a/agents/ten_packages/extension/transcribe_asr_python/transcribe_wrapper.py b/agents/ten_packages/extension/transcribe_asr_python/transcribe_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..25a0d1f8ff8115e935dcea046d5c67b9efa1567a --- /dev/null +++ b/agents/ten_packages/extension/transcribe_asr_python/transcribe_wrapper.py @@ -0,0 +1,179 @@ +import asyncio + +from ten import TenEnv, Data + +from amazon_transcribe.auth import StaticCredentialResolver +from amazon_transcribe.client import TranscribeStreamingClient +from amazon_transcribe.handlers import TranscriptResultStreamHandler +from amazon_transcribe.model import ( + TranscriptEvent, + TranscriptResultStream, + StartStreamTranscriptionEventStream, +) + +from .transcribe_config import TranscribeConfig + +DATA_OUT_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_OUT_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_OUT_TEXT_DATA_PROPERTY_STREAM_ID = "stream_id" +DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT = "end_of_segment" + +def create_and_send_data(ten: TenEnv, text_result: str, is_final: bool, stream_id: int = 0): + stable_data = Data.create("text_data") + stable_data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_IS_FINAL, is_final) + stable_data.set_property_string(DATA_OUT_TEXT_DATA_PROPERTY_TEXT, text_result) + stable_data.set_property_int(DATA_OUT_TEXT_DATA_PROPERTY_STREAM_ID, stream_id) + stable_data.set_property_bool(DATA_OUT_TEXT_DATA_PROPERTY_END_OF_SEGMENT, is_final) + ten.send_data(stable_data) + + +class AsyncTranscribeWrapper: + def __init__( + self, + config: TranscribeConfig, + queue: asyncio.Queue, + ten: TenEnv, + loop: asyncio.BaseEventLoop, + ): + self.queue = queue + self.ten = ten + self.stopped = False + self.config = config + self.loop = loop + self.stream = None + self.handler = None + self.event_handler_task = None + + if config.access_key and config.secret_key: + ten.log_info(f"init trascribe client with access key: {config.access_key}") + self.transcribe_client = TranscribeStreamingClient( + region=config.region, + credential_resolver=StaticCredentialResolver( + access_key_id=config.access_key, secret_access_key=config.secret_key + ), + ) + else: + ten.log_info( + "init trascribe client without access key, using default credentials provider chain." + ) + + self.transcribe_client = TranscribeStreamingClient(region=config.region) + + asyncio.set_event_loop(self.loop) + self.reset_stream() + + def reset_stream(self): + self.stream = None + self.handler = None + self.event_handler_task = None + + async def cleanup(self): + if self.stream: + await self.stream.input_stream.end_stream() + self.ten.log_info("cleanup: stream ended.") + + if self.event_handler_task: + await self.event_handler_task + self.ten.log_info("cleanup: event handler ended.") + + self.reset_stream() + + async def create_stream(self, stream_id) -> bool: + try: + self.stream = await self.get_transcribe_stream() + self.handler = TranscribeEventHandler(self.stream.output_stream, self.ten, stream_id) + self.event_handler_task = asyncio.create_task(self.handler.handle_events()) + except Exception as e: + self.ten.log_error(str(e)) + return False + + return True + + async def send_frame(self) -> None: + while not self.stopped: + try: + pcm_frame = await asyncio.wait_for(self.queue.get(), timeout=3.0) + + if pcm_frame is None: + self.ten.log_warn("send_frame: exit due to None value got.") + return + + frame_buf = pcm_frame.get_buf() + if not frame_buf: + self.ten.log_warn("send_frame: empty pcm_frame detected.") + continue + stream_id = pcm_frame.get_property_int("stream_id") + if not self.stream: + self.ten.log_info("lazy init stream.") + if not await self.create_stream(stream_id): + continue + + await self.stream.input_stream.send_audio_event(audio_chunk=frame_buf) + self.queue.task_done() + except asyncio.TimeoutError: + if self.stream: + await self.cleanup() + self.ten.log_info( + "send_frame: no data for 10s, will close current stream and create a new one when receving new frame." + ) + else: + self.ten.log_info("send_frame: waiting for pcm frame.") + except IOError as e: + self.ten.log_error(f"Error in send_frame: {e}") + except Exception as e: + self.ten.log_error(f"Error in send_frame: {e}") + raise e + + self.ten.log_info("send_frame: exit due to self.stopped == True") + + async def transcribe_loop(self) -> None: + try: + await self.send_frame() + except Exception as e: + self.ten.log_error(str(e)) + finally: + await self.cleanup() + + async def get_transcribe_stream(self) -> StartStreamTranscriptionEventStream: + stream = await self.transcribe_client.start_stream_transcription( + language_code=self.config.lang_code, + media_sample_rate_hz=self.config.sample_rate, + media_encoding=self.config.media_encoding, + ) + return stream + + def run(self) -> None: + self.loop.run_until_complete(self.transcribe_loop()) + self.loop.close() + self.ten.log_info("async_transcribe_wrapper: thread completed.") + + def stop(self) -> None: + self.stopped = True + + +class TranscribeEventHandler(TranscriptResultStreamHandler): + def __init__(self, transcript_result_stream: TranscriptResultStream, ten: TenEnv, stream_id: int = 0): + super().__init__(transcript_result_stream) + self.ten = ten + self.stream_id = stream_id + + async def handle_transcript_event(self, transcript_event: TranscriptEvent) -> None: + results = transcript_event.transcript.results + text_result = "" + + is_final = True + + for result in results: + if result.is_partial: + is_final = False + # continue + + for alt in result.alternatives: + text_result += alt.transcript + + if not text_result: + return + + self.ten.log_info(f"got transcript: [{text_result}], is_final: [{is_final}]") + + create_and_send_data(ten=self.ten, text_result=text_result, is_final=is_final, stream_id=self.stream_id) diff --git a/agents/ten_packages/extension/tsdb_firestore/README.md b/agents/ten_packages/extension/tsdb_firestore/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4d2bf6b4b57e9a9904f95d01d20846f30a3e90c1 --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/README.md @@ -0,0 +1,13 @@ +# Firestore TSDB Extension + +Public Doc: https://firebase.google.com/docs/firestore + +## Configurations + +You can config this extension by providing following environments: + +- credentials: a dict, represents the contents of certificate, which is from Google service account +- collection_name: a string, denotes the collection to store chat contents +- channel_name: a string, used to fetch the corresponding document in storage + +In addition, to implement the deletion of document based on ttl (which is 1 day by default, and will refresh each time fetching the document), you should set TTL or define Cloud Functions with Firestore \ No newline at end of file diff --git a/agents/ten_packages/extension/tsdb_firestore/__init__.py b/agents/ten_packages/extension/tsdb_firestore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd75ddef4ae8e15366d6ed94ee557e6481a4989 --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/tsdb_firestore/addon.py b/agents/ten_packages/extension/tsdb_firestore/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..e1ad56ce813b59529372dcc7cceca7e2aede15c6 --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/addon.py @@ -0,0 +1,21 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("tsdb_firestore") +class TSDBFirestoreExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import TSDBFirestoreExtension + ten_env.log_info("TSDBFirestoreExtensionAddon on_create_instance") + ten_env.on_create_instance_done(TSDBFirestoreExtension(name), context) diff --git a/agents/ten_packages/extension/tsdb_firestore/extension.py b/agents/ten_packages/extension/tsdb_firestore/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..3ff50652dd17d341b66f61d487391ce7bdf1f377 --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/extension.py @@ -0,0 +1,316 @@ +# +# +# Agora Real Time Engagement +# Created by Wei Hu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +from ten import ( + AudioFrame, + VideoFrame, + Extension, + TenEnv, + Cmd, + StatusCode, + CmdResult, + Data, +) +import firebase_admin +from firebase_admin import credentials +from firebase_admin import firestore +import datetime +import asyncio +import queue +import threading +import json +from typing import List, Any + +DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL = "is_final" +DATA_IN_TEXT_DATA_PROPERTY_STREAM_ID = "stream_id" +DATA_IN_TEXT_DATA_PROPERTY_TEXT = "text" +DATA_IN_TEXT_DATA_PROPERTY_ROLE = "role" + +PROPERTY_CREDENTIALS = "credentials" +PROPERTY_CHANNEL_NAME = "channel_name" +PROPERTY_COLLECTION_NAME = "collection_name" +PROPERTY_TTL = "ttl" + +RETRIEVE_CMD = "retrieve" +CMD_OUT_PROPERTY_RESPONSE = "response" +DOC_EXPIRE_PATH = "expireAt" +DOC_CONTENTS_PATH = "contents" +CONTENT_ROLE_PATH = "role" +CONTENT_TS_PATH = "ts" +CONTENT_STREAM_ID_PATH = "stream_id" +CONTENT_INPUT_PATH = "input" +DEFAULT_TTL = 1 # days + + +def get_current_time(): + # Get the current time + start_time = datetime.datetime.now() + # Get the number of microseconds since the Unix epoch + unix_microseconds = int(start_time.timestamp() * 1_000_000) + return unix_microseconds + + +def order_by_ts(contents: List[str]) -> List[Any]: + tmp = [] + for c in contents: + tmp.append(json.loads(c)) + sorted_contents = sorted(tmp, key=lambda x: x[CONTENT_TS_PATH]) + res = [] + for sc in sorted_contents: + res.append( + { + CONTENT_ROLE_PATH: sc[CONTENT_ROLE_PATH], + CONTENT_INPUT_PATH: sc[CONTENT_INPUT_PATH], + CONTENT_STREAM_ID_PATH: sc.get(CONTENT_STREAM_ID_PATH, 0), + } + ) + return res + + +@firestore.transactional +def update_in_transaction(transaction, doc_ref, content): + transaction.update(doc_ref, content) + + +@firestore.transactional +def read_in_transaction(transaction, doc_ref): + doc = doc_ref.get(transaction=transaction) + return doc.to_dict() + + +class TSDBFirestoreExtension(Extension): + def __init__(self, name: str): + super().__init__(name) + self.stopped = False + self.thread = None + self.queue = queue.Queue() + self.stopEvent = asyncio.Event() + self.cmd_thread = None + self.loop = None + self.credentials = None + self.channel_name = "" + self.collection_name = "" + self.ttl = DEFAULT_TTL + self.client = None + self.document_ref = None + + self.current_stream_id = 0 + self.cache = "" + + async def __thread_routine(self, ten_env: TenEnv): + ten_env.log_info("__thread_routine start") + self.loop = asyncio.get_running_loop() + ten_env.on_start_done() + await self.stopEvent.wait() + + async def stop_thread(self): + self.stopEvent.set() + + def on_init(self, ten_env: TenEnv) -> None: + ten_env.log_info("TSDBFirestoreExtension on_init") + ten_env.on_init_done() + + def on_start(self, ten_env: TenEnv) -> None: + ten_env.log_info("TSDBFirestoreExtension on_start") + + try: + self.credentials = ten_env.get_property_to_json(PROPERTY_CREDENTIALS) + except Exception as err: + ten_env.log_error( + f"GetProperty required {PROPERTY_CREDENTIALS} failed, err: {err}" + ) + return + + try: + self.channel_name = ten_env.get_property_string(PROPERTY_CHANNEL_NAME) + except Exception as err: + ten_env.log_error( + f"GetProperty required {PROPERTY_CHANNEL_NAME} failed, err: {err}" + ) + return + + try: + self.collection_name = ten_env.get_property_string(PROPERTY_COLLECTION_NAME) + except Exception as err: + ten_env.log_error( + f"GetProperty required {PROPERTY_COLLECTION_NAME} failed, err: {err}" + ) + return + + # start firestore db + cred = credentials.Certificate(json.loads(self.credentials)) + firebase_admin.initialize_app(cred) + self.client = firestore.client() + + self.document_ref = self.client.collection(self.collection_name).document( + self.channel_name + ) + # update ttl + expiration_time = datetime.datetime.now() + datetime.timedelta(days=self.ttl) + exists = self.document_ref.get().exists + if exists: + self.document_ref.update({DOC_EXPIRE_PATH: expiration_time}) + ten_env.log_info( + f"reset document ttl, {self.ttl} day(s), for the channel {self.channel_name}" + ) + else: + # not exists yet, set to create one + self.document_ref.set({DOC_EXPIRE_PATH: expiration_time}) + ten_env.log_info( + f"create new document and set ttl, {self.ttl} day(s), for the channel {self.channel_name}" + ) + + # start the loop to handle data in + self.thread = threading.Thread(target=self.async_handle, args=[ten_env]) + self.thread.start() + + # start the loop to handle cmd in + self.cmd_thread = threading.Thread( + target=asyncio.run, args=(self.__thread_routine(ten_env),) + ) + self.cmd_thread.start() + + def async_handle(self, ten_env: TenEnv) -> None: + while not self.stopped: + try: + value = self.queue.get() + if value is None: + ten_env.log_info("exit handle loop") + break + ts, input_path, role, stream_id = value + content_str = json.dumps( + { + CONTENT_ROLE_PATH: role, + CONTENT_INPUT_PATH: input_path, + CONTENT_TS_PATH: ts, + CONTENT_STREAM_ID_PATH: stream_id, + } + ) + update_in_transaction( + self.client.transaction(), + self.document_ref, + {DOC_CONTENTS_PATH: firestore.ArrayUnion([content_str])}, + ) + ten_env.log_info( + f"append {content_str} to firestore document {self.channel_name}" + ) + except Exception: + ten_env.log_error("Failed to store chat contents") + + def on_stop(self, ten_env: TenEnv) -> None: + ten_env.log_info("TSDBFirestoreExtension on_stop") + + # clear the queue and stop the thread to process data in + self.stopped = True + while not self.queue.empty(): + self.queue.get() + self.queue.put(None) + if self.thread is not None: + self.thread.join() + self.thread = None + + # stop the thread to process cmd in + if self.cmd_thread is not None and self.cmd_thread.is_alive(): + asyncio.run_coroutine_threadsafe(self.stop_thread(), self.loop) + self.cmd_thread.join() + self.cmd_thread = None + + ten_env.on_stop_done() + + def on_deinit(self, ten_env: TenEnv) -> None: + ten_env.log_info("TSDBFirestoreExtension on_deinit") + ten_env.on_deinit_done() + + def on_cmd(self, ten_env: TenEnv, cmd: Cmd) -> None: + try: + cmd_name = cmd.get_name() + ten_env.log_info(f"on_cmd name {cmd_name}") + if cmd_name == RETRIEVE_CMD: + asyncio.run_coroutine_threadsafe(self.retrieve(ten_env, cmd), self.loop) + else: + ten_env.log_info(f"unknown cmd name {cmd_name}") + cmd_result = CmdResult.create(StatusCode.ERROR) + ten_env.return_result(cmd_result, cmd) + except Exception: + ten_env.return_result(CmdResult.create(StatusCode.ERROR), cmd) + + async def retrieve(self, ten_env: TenEnv, cmd: Cmd): + try: + doc_dict = read_in_transaction(self.client.transaction(), self.document_ref) + if DOC_CONTENTS_PATH in doc_dict: + contents = doc_dict[DOC_CONTENTS_PATH] + ten_env.log_info(f"after retrieve {contents}") + ret = CmdResult.create(StatusCode.OK) + ret.set_property_string( + CMD_OUT_PROPERTY_RESPONSE, json.dumps(order_by_ts(contents)) + ) + ten_env.return_result(ret, cmd) + else: + ten_env.log_info(f"no contents for the channel {self.channel_name} yet") + ten_env.return_result(CmdResult.create(StatusCode.ERROR), cmd) + except Exception: + ten_env.log_error( + f"Failed to read the document for the channel {self.channel_name}" + ) + ten_env.return_result(CmdResult.create(StatusCode.ERROR), cmd) + + def on_data(self, ten_env: TenEnv, data: Data) -> None: + ten_env.log_info("TSDBFirestoreExtension on_data") + + # assume 'data' is an object from which we can get properties + is_final = False + try: + is_final = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL) + if not is_final: + ten_env.log_info("ignore non-final input") + return + except Exception as err: + ten_env.log_info( + f"OnData GetProperty {DATA_IN_TEXT_DATA_PROPERTY_IS_FINAL} failed, err: {err}" + ) + + stream_id = 0 + try: + stream_id = data.get_property_bool(DATA_IN_TEXT_DATA_PROPERTY_STREAM_ID) + except Exception as err: + ten_env.log_info( + f"OnData GetProperty {DATA_IN_TEXT_DATA_PROPERTY_STREAM_ID} failed, err: {err}" + ) + + # get input text + try: + input_text = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_TEXT) + if not input_text: + ten_env.log_info("ignore empty text") + return + ten_env.log_info(f"OnData input text: [{input_text}]") + except Exception as err: + ten_env.log_info( + f"OnData GetProperty {DATA_IN_TEXT_DATA_PROPERTY_TEXT} failed, err: {err}" + ) + return + # get stream id + try: + role = data.get_property_string(DATA_IN_TEXT_DATA_PROPERTY_ROLE) + if not role: + ten_env.log_warn("ignore empty role") + return + except Exception as err: + ten_env.log_info( + f"OnData GetProperty {DATA_IN_TEXT_DATA_PROPERTY_ROLE} failed, err: {err}" + ) + return + + ts = get_current_time() + self.queue.put((ts, input_text, role, stream_id)) + + def on_audio_frame(self, ten_env: TenEnv, audio_frame: AudioFrame) -> None: + pass + + def on_video_frame(self, ten_env: TenEnv, video_frame: VideoFrame) -> None: + pass diff --git a/agents/ten_packages/extension/tsdb_firestore/manifest.json b/agents/ten_packages/extension/tsdb_firestore/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..2a1cfc251c762eb9141b08f545e63a7ba738afaf --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/manifest.json @@ -0,0 +1,52 @@ +{ + "type": "extension", + "name": "tsdb_firestore", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md" + ] + }, + "api": { + "data_in": [ + { + "name": "append", + "property": { + "text": { + "type": "string" + }, + "is_final": { + "type": "bool" + }, + "role": { + "type": "string" + } + } + } + ], + "cmd_in": [ + { + "name": "retrieve", + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/tsdb_firestore/property.json b/agents/ten_packages/extension/tsdb_firestore/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/tsdb_firestore/requirements.txt b/agents/ten_packages/extension/tsdb_firestore/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4720fc6ff682bc1ad6fe7c58ad5c86958d4d1ff7 --- /dev/null +++ b/agents/ten_packages/extension/tsdb_firestore/requirements.txt @@ -0,0 +1 @@ +firebase-admin \ No newline at end of file diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/README.md b/agents/ten_packages/extension/vision_analyze_tool_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7760fb3e6c1978803346e9764c36aa095da4838b --- /dev/null +++ b/agents/ten_packages/extension/vision_analyze_tool_python/README.md @@ -0,0 +1,29 @@ +# vision_analyze_tool_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/__init__.py b/agents/ten_packages/extension/vision_analyze_tool_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/vision_analyze_tool_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/addon.py b/agents/ten_packages/extension/vision_analyze_tool_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..1b499d5e314fece8808b44d5a9068d6dcfce2def --- /dev/null +++ b/agents/ten_packages/extension/vision_analyze_tool_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import VisionAnalyzeToolExtension + + +@register_addon_as_extension("vision_analyze_tool_python") +class VisionAnalyzeToolExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + ten_env.log_info("TSDBFirestoreExtensionAddon on_create_instance") + ten_env.on_create_instance_done(VisionAnalyzeToolExtension(name), context) diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/extension.py b/agents/ten_packages/extension/vision_analyze_tool_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..ac075d6217c4f1f81ccf445699f95415d216db9e --- /dev/null +++ b/agents/ten_packages/extension/vision_analyze_tool_python/extension.py @@ -0,0 +1,182 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +import json +from ten import ( + AudioFrame, + VideoFrame, + AsyncTenEnv, + Cmd, + Data, +) +from PIL import Image +from io import BytesIO +from base64 import b64encode + +from ten_ai_base.const import CMD_CHAT_COMPLETION_CALL +from ten_ai_base.types import ( + LLMChatCompletionUserMessageParam, + LLMToolMetadata, + LLMToolMetadataParameter, + LLMToolResult, + LLMToolResultLLMResult, +) +from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension + + +def rgb2base64jpeg(rgb_data, width, height): + # Convert the RGB image to a PIL Image + pil_image = Image.frombytes("RGBA", (width, height), bytes(rgb_data)) + pil_image = pil_image.convert("RGB") + + # Resize the image while maintaining its aspect ratio + pil_image = resize_image_keep_aspect(pil_image, 512) + + # Save the image to a BytesIO object in JPEG format + buffered = BytesIO() + pil_image.save(buffered, format="JPEG") + # pil_image.save("test.jpg", format="JPEG") + + # Get the byte data of the JPEG image + jpeg_image_data = buffered.getvalue() + + # Convert the JPEG byte data to a Base64 encoded string + base64_encoded_image = b64encode(jpeg_image_data).decode("utf-8") + + # Create the data URL + mime_type = "image/jpeg" + base64_url = f"data:{mime_type};base64,{base64_encoded_image}" + return base64_url + + +def resize_image_keep_aspect(image, max_size=512): + """ + Resize an image while maintaining its aspect ratio, ensuring the larger dimension is max_size. + If both dimensions are smaller than max_size, the image is not resized. + + :param image: A PIL Image object + :param max_size: The maximum size for the larger dimension (width or height) + :return: A PIL Image object (resized or original) + """ + # Get current width and height + width, height = image.size + + # If both dimensions are already smaller than max_size, return the original image + if width <= max_size and height <= max_size: + return image + + # Calculate the aspect ratio + aspect_ratio = width / height + + # Determine the new dimensions + if width > height: + new_width = max_size + new_height = int(max_size / aspect_ratio) + else: + new_height = max_size + new_width = int(max_size * aspect_ratio) + + # Resize the image with the new dimensions + resized_image = image.resize((new_width, new_height)) + + return resized_image + + +class VisionAnalyzeToolExtension(AsyncLLMToolBaseExtension): + image_data = None + image_width = 0 + image_height = 0 + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_init") + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_start") + await super().on_start(ten_env) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_stop") + + # TODO: clean up resources + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + await super().on_cmd(ten_env, cmd) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + data_name = data.get_name() + ten_env.log_debug("on_data name {}".format(data_name)) + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + audio_frame_name = audio_frame.get_name() + ten_env.log_debug("on_audio_frame name {}".format(audio_frame_name)) + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + video_frame_name = video_frame.get_name() + ten_env.log_debug("on_video_frame name {}".format(video_frame_name)) + + self.image_data = video_frame.get_buf() + self.image_width = video_frame.get_width() + self.image_height = video_frame.get_height() + + def get_tool_metadata(self, ten_env: AsyncTenEnv) -> list[LLMToolMetadata]: + return [ + LLMToolMetadata( + name="get_vision_chat_completion", + description="Get the image analyze result from camera. Call this whenever you need to understand the input camera image like you have vision capability, for example when user asks 'What can you see in my camera?' or 'Can you see me?'", + parameters=[ + LLMToolMetadataParameter( + name="query", + type="string", + description="The vision completion query.", + required=True, + ), + ], + ), + ] + + async def run_tool( + self, ten_env: AsyncTenEnv, name: str, args: dict + ) -> LLMToolResult | None: + if name == "get_vision_chat_completion": + if self.image_data is None: + raise ValueError("No image data available") + + if "query" not in args: + raise ValueError("Failed to get property") + + query = args["query"] + + base64_image = rgb2base64jpeg( + self.image_data, self.image_width, self.image_height + ) + # return LLMToolResult(message=LLMCompletionArgsMessage(role="user", content=[result])) + cmd: Cmd = Cmd.create(CMD_CHAT_COMPLETION_CALL) + message: LLMChatCompletionUserMessageParam = ( + LLMChatCompletionUserMessageParam( + role="user", + content=[ + {"type": "text", "text": query}, + {"type": "image_url", "image_url": {"url": base64_image}}, + ], + ) + ) + cmd.set_property_from_json("arguments", json.dumps({"messages": [message]})) + ten_env.log_info("send_cmd {}".format(message)) + [cmd_result, _] = await ten_env.send_cmd(cmd) + result = cmd_result.get_property_to_json("response") + return LLMToolResultLLMResult( + type="llmresult", + content=json.dumps(result), + ) diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/manifest.json b/agents/ten_packages/extension/vision_analyze_tool_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..84dd7f6e11f166108a4dceb5d2ab67c87a56846d --- /dev/null +++ b/agents/ten_packages/extension/vision_analyze_tool_python/manifest.json @@ -0,0 +1,85 @@ +{ + "type": "extension", + "name": "vision_analyze_tool_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": {}, + "cmd_in": [ + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "cmd_out": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "video_frame_in": [ + { + "name": "video_frame", + "property": {} + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/vision_analyze_tool_python/property.json b/agents/ten_packages/extension/vision_analyze_tool_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/vision_analyze_tool_python/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/vision_tool_python/README.md b/agents/ten_packages/extension/vision_tool_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b00a31511a878b1d0291b9a098f29d83ce137895 --- /dev/null +++ b/agents/ten_packages/extension/vision_tool_python/README.md @@ -0,0 +1,29 @@ +# vision_tool_python + + + +## Features + + + +- xxx feature + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + + + +## Development + +### Build + + + +### Unit test + + + +## Misc + + diff --git a/agents/ten_packages/extension/vision_tool_python/__init__.py b/agents/ten_packages/extension/vision_tool_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72593ab2259f95627bdd500fe3d062984e7f44c6 --- /dev/null +++ b/agents/ten_packages/extension/vision_tool_python/__init__.py @@ -0,0 +1,6 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from . import addon diff --git a/agents/ten_packages/extension/vision_tool_python/addon.py b/agents/ten_packages/extension/vision_tool_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..f0aba7e19f18db245ff4b6a3de5d9ea2d6d0bbe8 --- /dev/null +++ b/agents/ten_packages/extension/vision_tool_python/addon.py @@ -0,0 +1,19 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) +from .extension import VisionToolExtension + + +@register_addon_as_extension("vision_tool_python") +class VisionToolExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + ten_env.log_info("VisionToolExtensionAddon on_create_instance") + ten_env.on_create_instance_done(VisionToolExtension(name), context) diff --git a/agents/ten_packages/extension/vision_tool_python/extension.py b/agents/ten_packages/extension/vision_tool_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..a0345c23d39761212f78fa4bc2aca1731357b578 --- /dev/null +++ b/agents/ten_packages/extension/vision_tool_python/extension.py @@ -0,0 +1,153 @@ +# +# This file is part of TEN Framework, an open source project. +# Licensed under the Apache License, Version 2.0. +# See the LICENSE file for more information. +# +from ten_ai_base.types import LLMChatCompletionContentPartImageParam, LLMToolMetadata, LLMToolResult, LLMToolResultRequery +from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension +from ten import ( + AudioFrame, + VideoFrame, + AsyncTenEnv, + Cmd, + Data, +) +from PIL import Image +from io import BytesIO +from base64 import b64encode + + +def rgb2base64jpeg(rgb_data, width, height): + # Convert the RGB image to a PIL Image + pil_image = Image.frombytes("RGBA", (width, height), bytes(rgb_data)) + pil_image = pil_image.convert("RGB") + + # Resize the image while maintaining its aspect ratio + pil_image = resize_image_keep_aspect(pil_image, 512) + + # Save the image to a BytesIO object in JPEG format + buffered = BytesIO() + pil_image.save(buffered, format="JPEG") + # pil_image.save("test.jpg", format="JPEG") + + # Get the byte data of the JPEG image + jpeg_image_data = buffered.getvalue() + + # Convert the JPEG byte data to a Base64 encoded string + base64_encoded_image = b64encode(jpeg_image_data).decode("utf-8") + + # Create the data URL + mime_type = "image/jpeg" + base64_url = f"data:{mime_type};base64,{base64_encoded_image}" + return base64_url + + +def resize_image_keep_aspect(image, max_size=512): + """ + Resize an image while maintaining its aspect ratio, ensuring the larger dimension is max_size. + If both dimensions are smaller than max_size, the image is not resized. + + :param image: A PIL Image object + :param max_size: The maximum size for the larger dimension (width or height) + :return: A PIL Image object (resized or original) + """ + # Get current width and height + width, height = image.size + + # If both dimensions are already smaller than max_size, return the original image + if width <= max_size and height <= max_size: + return image + + # Calculate the aspect ratio + aspect_ratio = width / height + + # Determine the new dimensions + if width > height: + new_width = max_size + new_height = int(max_size / aspect_ratio) + else: + new_height = max_size + new_width = int(max_size * aspect_ratio) + + # Resize the image with the new dimensions + resized_image = image.resize((new_width, new_height)) + + return resized_image + + +class VisionToolExtension(AsyncLLMToolBaseExtension): + image_data = None + image_width = 0 + image_height = 0 + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_init") + await super().on_init(ten_env) + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_start") + await super().on_start(ten_env) + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_stop") + + # TODO: clean up resources + + await super().on_stop(ten_env) + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + await super().on_cmd(ten_env, cmd) + + async def on_data(self, ten_env: AsyncTenEnv, data: Data) -> None: + data_name = data.get_name() + ten_env.log_debug("on_data name {}".format(data_name)) + + async def on_audio_frame( + self, ten_env: AsyncTenEnv, audio_frame: AudioFrame + ) -> None: + audio_frame_name = audio_frame.get_name() + ten_env.log_debug("on_audio_frame name {}".format(audio_frame_name)) + + async def on_video_frame( + self, ten_env: AsyncTenEnv, video_frame: VideoFrame + ) -> None: + video_frame_name = video_frame.get_name() + ten_env.log_debug("on_video_frame name {}".format(video_frame_name)) + + self.image_data = video_frame.get_buf() + self.image_width = video_frame.get_width() + self.image_height = video_frame.get_height() + + def get_tool_metadata(self, ten_env: AsyncTenEnv) -> list[LLMToolMetadata]: + return [ + LLMToolMetadata( + name="get_vision_tool", + description="Get the image from camera. Call this whenever you need to understand the input camera image like you have vision capability, for example when user asks 'What can you see?' or 'Can you see me?'", + parameters=[], + ), + ] + + async def run_tool( + self, ten_env: AsyncTenEnv, name: str, args: dict + ) -> LLMToolResult | None: + if name == "get_vision_tool": + if self.image_data is None: + raise ValueError("No image data available") + + base64_image = rgb2base64jpeg( + self.image_data, self.image_width, self.image_height + ) + return LLMToolResultRequery( + type="requery", + content=[ + LLMChatCompletionContentPartImageParam( + type="image_url", + image_url={ + "url": base64_image, + }, + ) + ], + ) diff --git a/agents/ten_packages/extension/vision_tool_python/manifest.json b/agents/ten_packages/extension/vision_tool_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..c6e5206af13f7ba9b06060eafc8196219d222268 --- /dev/null +++ b/agents/ten_packages/extension/vision_tool_python/manifest.json @@ -0,0 +1,85 @@ +{ + "type": "extension", + "name": "vision_tool_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md", + "tests/**" + ] + }, + "api": { + "property": {}, + "cmd_in": [ + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ], + "cmd_out": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "video_frame_in": [ + { + "name": "video_frame", + "property": {} + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/vision_tool_python/property.json b/agents/ten_packages/extension/vision_tool_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/agents/ten_packages/extension/vision_tool_python/property.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agents/ten_packages/extension/weatherapi_tool_python/README.md b/agents/ten_packages/extension/weatherapi_tool_python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de7c18aa2d4fffd18f7bf3b40062441f82b5bfe0 --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/README.md @@ -0,0 +1,21 @@ +# weatherapi_tool_python + +This is the tool demo for weather query. + +## Features + +- Fetch today's weather. +- Search for history weather. +- Forcast weather in 3 days. + +## API + +Refer to `api` definition in [manifest.json] and default values in [property.json](property.json). + +### Out: + +- `tool_register`: auto register tool to llm + +### In: + +- `tool_call`: sync cmd to fetch weather diff --git a/agents/ten_packages/extension/weatherapi_tool_python/__init__.py b/agents/ten_packages/extension/weatherapi_tool_python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..27de41c93aa9dd96cc6a99619bbe1245fed4d829 --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/__init__.py @@ -0,0 +1,8 @@ +# +# +# Agora Real Time Engagement +# Created by Tomas Liu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from . import addon diff --git a/agents/ten_packages/extension/weatherapi_tool_python/addon.py b/agents/ten_packages/extension/weatherapi_tool_python/addon.py new file mode 100644 index 0000000000000000000000000000000000000000..e34608d75cdd282f0d54deb656db27f0fc4f8d33 --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/addon.py @@ -0,0 +1,21 @@ +# +# +# Agora Real Time Engagement +# Created by Tomas Liu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# +from ten import ( + Addon, + register_addon_as_extension, + TenEnv, +) + + +@register_addon_as_extension("weatherapi_tool_python") +class WeatherToolExtensionAddon(Addon): + + def on_create_instance(self, ten_env: TenEnv, name: str, context) -> None: + from .extension import WeatherToolExtension + ten_env.log_info("WeatherToolExtensionAddon on_create_instance") + ten_env.on_create_instance_done(WeatherToolExtension(name), context) diff --git a/agents/ten_packages/extension/weatherapi_tool_python/extension.py b/agents/ten_packages/extension/weatherapi_tool_python/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..10fe503b4d63232cba1bbd00238eb5893cab797a --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/extension.py @@ -0,0 +1,255 @@ +# +# +# Agora Real Time Engagement +# Created by Tomas Liu in 2024-08. +# Copyright (c) 2024 Agora IO. All rights reserved. +# +# + +import json +import aiohttp + +from typing import Any +from dataclasses import dataclass + +from ten import Cmd + +from ten.async_ten_env import AsyncTenEnv +from ten_ai_base.config import BaseConfig +from ten_ai_base.types import LLMToolMetadata, LLMToolMetadataParameter, LLMToolResult, LLMToolResultLLMResult +from ten_ai_base.llm_tool import AsyncLLMToolBaseExtension + +CMD_TOOL_REGISTER = "tool_register" +CMD_TOOL_CALL = "tool_call" +CMD_PROPERTY_NAME = "name" +CMD_PROPERTY_ARGS = "args" + +TOOL_REGISTER_PROPERTY_NAME = "name" +TOOL_REGISTER_PROPERTY_DESCRIPTON = "description" +TOOL_REGISTER_PROPERTY_PARAMETERS = "parameters" +TOOL_CALLBACK = "callback" + +CURRENT_TOOL_NAME = "get_current_weather" +CURRENT_TOOL_DESCRIPTION = "Determine current weather in user's location." +CURRENT_TOOL_PARAMETERS = { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state (use only English) e.g. San Francisco, CA", + } + }, + "required": ["location"], +} + +# for free key, only 7 days before, see more in https://www.weatherapi.com/pricing.aspx +HISTORY_TOOL_NAME = "get_past_weather" +HISTORY_TOOL_DESCRIPTION = "Determine weather within past 7 days in user's location." +HISTORY_TOOL_PARAMETERS = { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state (use only English) e.g. San Francisco, CA", + }, + "datetime": { + "type": "string", + "description": "The datetime user is referring in date format e.g. 2024-10-09", + }, + }, + "required": ["location", "datetime"], +} + +# for free key, only 3 days after, see more in https://www.weatherapi.com/pricing.aspx +FORECAST_TOOL_NAME = "get_future_weather" +FORECAST_TOOL_DESCRIPTION = "Determine weather in next 3 days in user's location." +FORECAST_TOOL_PARAMETERS = { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state (use only English) e.g. San Francisco, CA", + } + }, + "required": ["location"], +} + +PROPERTY_API_KEY = "api_key" # Required + + +@dataclass +class WeatherToolConfig(BaseConfig): + api_key: str = "" + + +class WeatherToolExtension(AsyncLLMToolBaseExtension): + def __init__(self, name: str) -> None: + super().__init__(name) + self.session = None + self.ten_env = None + self.config: WeatherToolConfig = None + + async def on_init(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_init") + self.session = aiohttp.ClientSession() + + async def on_start(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_start") + + self.config = await WeatherToolConfig.create_async(ten_env=ten_env) + ten_env.log_info(f"config: {self.config}") + if self.config.api_key: + await super().on_start(ten_env) + + self.ten_env = ten_env + + async def on_stop(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_stop") + + # TODO: clean up resources + if self.session: + await self.session.close() + self.session = None # Ensure it can't be reused accidentally + + async def on_deinit(self, ten_env: AsyncTenEnv) -> None: + ten_env.log_debug("on_deinit") + + async def on_cmd(self, ten_env: AsyncTenEnv, cmd: Cmd) -> None: + cmd_name = cmd.get_name() + ten_env.log_debug("on_cmd name {}".format(cmd_name)) + + await super().on_cmd(ten_env, cmd) + + def get_tool_metadata(self, ten_env: AsyncTenEnv) -> list[LLMToolMetadata]: + return [ + LLMToolMetadata( + name=CURRENT_TOOL_NAME, + description=CURRENT_TOOL_DESCRIPTION, + parameters=[ + LLMToolMetadataParameter( + name="location", + type="string", + description="The city and state (use only English) e.g. San Francisco, CA", + required=True, + ), + ], + ), + LLMToolMetadata( + name=HISTORY_TOOL_NAME, + description=HISTORY_TOOL_DESCRIPTION, + parameters=[ + LLMToolMetadataParameter( + name="location", + type="string", + description="The city and state (use only English) e.g. San Francisco, CA", + required=True, + ), + LLMToolMetadataParameter( + name="datetime", + type="string", + description="The datetime user is referring in date format e.g. 2024-10-09", + required=True, + ), + ], + ), + LLMToolMetadata( + name=FORECAST_TOOL_NAME, + description=FORECAST_TOOL_DESCRIPTION, + parameters=[ + LLMToolMetadataParameter( + name="location", + type="string", + description="The city and state (use only English) e.g. San Francisco, CA", + required=True, + ), + ], + ), + ] + + async def run_tool( + self, ten_env: AsyncTenEnv, name: str, args: dict + ) -> LLMToolResult | None: + ten_env.log_info(f"run_tool name: {name}, args: {args}") + if name == CURRENT_TOOL_NAME: + result = await self._get_current_weather(args) + return LLMToolResultLLMResult( + type="llmresult", + content=json.dumps(result), + ) + elif name == HISTORY_TOOL_NAME: + result = await self._get_past_weather(args) + # result = LLMCompletionContentItemText(text="I see something") + return LLMToolResultLLMResult( + type="llmresult", + content=json.dumps(result), + ) + elif name == FORECAST_TOOL_NAME: + result = await self._get_future_weather(args) + # result = LLMCompletionContentItemText(text="I see something") + return LLMToolResultLLMResult( + type="llmresult", + content=json.dumps(result), + ) + + async def _get_current_weather(self, args: dict) -> Any: + if "location" not in args: + raise ValueError("Failed to get property") + + try: + location = args["location"] + url = f"http://api.weatherapi.com/v1/current.json?key={self.config.api_key}&q={location}&aqi=no" + + async with self.session.get(url) as response: + result = await response.json() + return { + "location": result.get("location", {}).get("name", ""), + "temperature": result.get("current", {}).get("temp_c", ""), + "humidity": result.get("current", {}).get("humidity", ""), + "wind_speed": result.get("current", {}).get("wind_kph", ""), + } + except Exception as e: + self.ten_env.log_error(f"Failed to get current weather: {e}") + return None + + async def _get_past_weather(self, args: dict) -> Any: + if "location" not in args or "datetime" not in args: + raise ValueError("Failed to get property") + + location = args["location"] + datetime = args["datetime"] + url = f"http://api.weatherapi.com/v1/history.json?key={self.config.api_key}&q={location}&dt={datetime}" + + async with self.session.get(url) as response: + result = await response.json() + + # Remove all hourly data + if ( + "forecast" in result + and "forecastday" in result["forecast"] + and result["forecast"]["forecastday"] + ): + result["forecast"]["forecastday"][0].pop("hour", None) + + return result + + async def _get_future_weather(self, args: dict) -> Any: + if "location" not in args: + raise ValueError("Failed to get property") + + location = args["location"] + url = f"http://api.weatherapi.com/v1/forecast.json?key={self.config.api_key}&q={location}&days=3&aqi=no&alerts=no" + + async with self.session.get(url) as response: + result = await response.json() + + # Log the result + self.ten_env.log_info(f"get result {result}") + + # Remove all hourly data + for d in result.get("forecast", {}).get("forecastday", []): + d.pop("hour", None) + + # Remove current weather data + result.pop("current", None) + + return result diff --git a/agents/ten_packages/extension/weatherapi_tool_python/manifest.json b/agents/ten_packages/extension/weatherapi_tool_python/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..7412445f5e02464befec5bc5185a1ced5e6f3ec7 --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/manifest.json @@ -0,0 +1,82 @@ +{ + "type": "extension", + "name": "weatherapi_tool_python", + "version": "0.1.0", + "dependencies": [ + { + "type": "system", + "name": "ten_runtime_python", + "version": "0.8" + } + ], + "package": { + "include": [ + "manifest.json", + "property.json", + "BUILD.gn", + "**.tent", + "**.py", + "README.md" + ] + }, + "api": { + "property": { + "api_key": { + "type": "string" + } + }, + "cmd_out": [ + { + "name": "tool_register", + "property": { + "tool": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + } + }, + "required": [ + "name", + "description", + "parameters" + ] + } + }, + "result": { + "property": { + "response": { + "type": "string" + } + } + } + } + ], + "cmd_in": [ + { + "name": "tool_call", + "property": { + "name": { + "type": "string" + }, + "args": { + "type": "string" + } + }, + "required": [ + "name" + ] + } + ] + } +} \ No newline at end of file diff --git a/agents/ten_packages/extension/weatherapi_tool_python/property.json b/agents/ten_packages/extension/weatherapi_tool_python/property.json new file mode 100644 index 0000000000000000000000000000000000000000..4f5f409a3348ef7faf6c1b1fb2272893c1c5e0e4 --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/property.json @@ -0,0 +1,3 @@ +{ + "api_key": "${env:WEATHERAPI_API_KEY|}" +} \ No newline at end of file diff --git a/agents/ten_packages/extension/weatherapi_tool_python/requirements.txt b/agents/ten_packages/extension/weatherapi_tool_python/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a899a04439584360de9e4d53f2246bab25f45da --- /dev/null +++ b/agents/ten_packages/extension/weatherapi_tool_python/requirements.txt @@ -0,0 +1,2 @@ +aiohttp +asyncio \ No newline at end of file diff --git a/app.py b/app.py index a4834282c1fda74dd374d1205e7e42f3c5df4c65..d7af90fd75fe13fd718878bebadfdc859683fac6 100644 --- a/app.py +++ b/app.py @@ -6,11 +6,6 @@ import time from pathlib import Path import signal import shutil -import http.client -import socketserver -from http.server import HTTPServer, BaseHTTPRequestHandler -import threading -import socket def main(): processes = [] @@ -28,375 +23,83 @@ def main(): print(f"ERROR: Playground directory not found at {playground_dir}", file=sys.stderr) return 1 - # Создаем директорию для логов с правами доступа - log_dir = "/tmp/ten_agent" - os.makedirs(log_dir, exist_ok=True) - # Даем всем права на запись в директорию логов - os.chmod(log_dir, 0o777) + # Запускаем простой HTTP сервер для Hugging Face + from http.server import HTTPServer, SimpleHTTPRequestHandler - # Определяем публичный URL для API в пространстве Hugging Face - public_url = os.environ.get('SPACE_URL', 'https://nitrox-ten.hf.space') - print(f"Using public URL: {public_url}") - - # Запускаем API сервер с правильными переменными окружения - print("Starting TEN-Agent API server on port 8080...") - api_server_env = os.environ.copy() - api_server_env["LOG_PATH"] = log_dir - api_server_env["LOG_STDOUT"] = "true" - api_server_env["WORKERS_MAX"] = "4" # Устанавливаем значение для WORKERS_MAX - api_server_env["WORKER_QUIT_TIMEOUT_SECONDES"] = "30" # Добавляем timeout для воркеров - api_server_env["PORT"] = "8080" # Явно указываем порт - # Разрешаем CORS - api_server_env["GIN_MODE"] = "release" - api_server_process = subprocess.Popen([str(api_binary)], env=api_server_env) - processes.append(api_server_process) - - # Даем время API серверу запуститься - time.sleep(3) - - # Пробуем получить IP-адрес для локальной сети - container_ip = "127.0.0.1" # По умолчанию - try: - # Получаем hostname контейнера - hostname = socket.gethostname() - # Получаем IP-адрес по hostname - container_ip = socket.gethostbyname(hostname) - print(f"Detected container IP: {container_ip}") - except Exception as e: - print(f"Failed to get container IP, using localhost: {e}") - - # Полезные переменные для Next.js - is_standalone = False - standalone_path = playground_dir / ".next" / "standalone" / "server.js" - if standalone_path.exists(): - is_standalone = True - print("Next.js is in standalone mode") - - # Копируем статические файлы для standalone режима, если нужно - if is_standalone: - print("Preparing static files for standalone mode...") - # Создаем нужные директории - standalone_static_dir = Path("/app/playground/.next/standalone/public") - os.makedirs(standalone_static_dir, exist_ok=True) - - # Копируем статические файлы из .next/static в public/_next/static - source_static = playground_dir / ".next" / "static" - target_static = standalone_static_dir / "_next" / "static" - if source_static.exists(): - print(f"Copying static files from {source_static} to {target_static}") - os.makedirs(target_static.parent, exist_ok=True) - - # Удаляем предыдущую директорию, если есть - if target_static.exists(): - shutil.rmtree(target_static) - - # Копируем все статические файлы - shutil.copytree(source_static, target_static) - else: - print(f"Static directory {source_static} not found, skipping copy") - - # Запускаем Playground UI на порту 3000 - print("Starting Playground UI on port 3000...") - playground_env = os.environ.copy() - # Подключаемся к API через внешний URL, доступный из браузера - playground_env["AGENT_SERVER_URL"] = public_url - playground_env["NODE_ENV"] = "production" # Убедимся, что запускаем в production режиме - playground_env["PORT"] = "3000" # Указываем порт через переменную окружения - playground_env["HOST"] = "0.0.0.0" # Указываем привязку ко всем интерфейсам - - # Добавляем переменные для корректной работы Next.js с API - playground_env["NEXT_PUBLIC_API_URL"] = public_url - - # В standalone режиме используем дополнительные параметры - if is_standalone: - print("Using Next.js standalone mode...") - # Запускаем Next.js в standalone режиме - playground_process = subprocess.Popen( - "cd /app/playground && node .next/standalone/server.js", - shell=True, - env=playground_env - ) - else: - # Запускаем Next.js в обычном режиме - playground_process = subprocess.Popen( - "cd /app/playground && pnpm start", - shell=True, - env=playground_env - ) - processes.append(playground_process) - - # Даем время Playground UI запуститься - time.sleep(15) # Увеличиваем время ожидания - - # Создаем эффективный прокси-сервер - class ProxyHandler(BaseHTTPRequestHandler): - # Отключаем логирование каждого запроса - def log_message(self, format, *args): - if args and args[0].startswith('GET /?logs=container'): - return # Игнорируем логи для запросов логов контейнера - sys.stderr.write("%s - - [%s] %s\n" % - (self.client_address[0], - self.log_date_time_string(), - format % args)) - + class CustomHandler(SimpleHTTPRequestHandler): def do_GET(self): - self._handle_request('GET') - - def do_POST(self): - self._handle_request('POST') - - def do_PUT(self): - self._handle_request('PUT') - - def do_DELETE(self): - self._handle_request('DELETE') - - def do_OPTIONS(self): - # Для OPTIONS запросов всегда отвечаем с CORS заголовками - self.send_response(200) - self.send_header('Access-Control-Allow-Origin', '*') - self.send_header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS') - self.send_header('Access-Control-Allow-Headers', 'Content-Type, Authorization, x-requested-with') - self.end_headers() - - def _handle_request(self, method): - try: - # Определяем, какой сервер должен обработить запрос - target_host = container_ip # Используем IP контейнера вместо localhost - - # При запросе статических файлов Next.js, если включен standalone режим, - # можем обработать их напрямую из файловой системы - if is_standalone and self.path.startswith('/_next/static/'): - static_file_path = playground_dir / ".next" / "static" / self.path[13:] - if static_file_path.exists() and static_file_path.is_file(): - try: - # Определяем тип файла - content_type = 'application/octet-stream' # По умолчанию - if self.path.endswith('.js'): - content_type = 'application/javascript' - elif self.path.endswith('.css'): - content_type = 'text/css' - elif self.path.endswith('.json'): - content_type = 'application/json' - elif self.path.endswith('.png'): - content_type = 'image/png' - elif self.path.endswith('.jpg') or self.path.endswith('.jpeg'): - content_type = 'image/jpeg' - elif self.path.endswith('.svg'): - content_type = 'image/svg+xml' - elif self.path.endswith('.woff'): - content_type = 'font/woff' - elif self.path.endswith('.woff2'): - content_type = 'font/woff2' - - # Отправляем файл - self.send_response(200) - self.send_header('Content-Type', content_type) - self.send_header('Content-Length', str(static_file_path.stat().st_size)) - self.end_headers() - - with open(static_file_path, 'rb') as f: - self.wfile.write(f.read()) - return - except Exception as file_error: - print(f"Error serving static file {static_file_path}: {file_error}") - # Продолжаем работу через прокси - - # API endpoints идут на порт 8080 (API сервер) - if self.path.startswith('/health') or \ - self.path.startswith('/list') or \ - self.path.startswith('/graphs') or \ - self.path.startswith('/start') or \ - self.path.startswith('/stop') or \ - self.path.startswith('/ping') or \ - self.path.startswith('/token') or \ - self.path.startswith('/dev-tmp') or \ - self.path.startswith('/vector'): - target_port = 8080 - else: - # Все остальные запросы (включая / и UI assets) идут на порт 3000 (Playground) - target_port = 3000 - - print(f"Proxying {method} {self.path} to {target_host}:{target_port}") - - # Пробуем подключиться к целевому серверу - conn = http.client.HTTPConnection(target_host, target_port, timeout=30) - - # Получаем данные запроса для POST/PUT - body = None - if method in ['POST', 'PUT']: - content_length = int(self.headers.get('Content-Length', 0)) - body = self.rfile.read(content_length) if content_length > 0 else None - - # Копируем все заголовки запроса - headers = {k: v for k, v in self.headers.items()} - - # Исправляем Host заголовок - headers['Host'] = f"{target_host}:{target_port}" - - # Отправляем запрос на правильный сервер - conn.request(method, self.path, body=body, headers=headers) - - # Получаем ответ - response = conn.getresponse() - - # Отправляем статус ответа - self.send_response(response.status) - - # Копируем все заголовки ответа - for header, value in response.getheaders(): - if header.lower() != 'transfer-encoding': # Исключаем заголовок transfer-encoding - self.send_header(header, value) - - # Добавляем CORS заголовки - self.send_header('Access-Control-Allow-Origin', '*') - self.send_header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS') - self.send_header('Access-Control-Allow-Headers', 'Content-Type, Authorization, x-requested-with') - - # Завершаем заголовки - self.end_headers() - - # Отправляем тело ответа - chunk_size = 8192 - while True: - chunk = response.read(chunk_size) - if not chunk: - break - self.wfile.write(chunk) - - # Закрываем соединение - conn.close() - - except Exception as e: - print(f"Proxy error for {method} {self.path}: {str(e)}", file=sys.stderr) - - # Для запросов мониторинга не показываем ошибку - if self.path == '/?logs=container' or self.path == '/?logs=build': - self.send_response(200) - self.send_header('Content-type', 'text/plain') - self.end_headers() - self.wfile.write(b"OK") - return - - # Проверяем, может ли это быть статический файл Next.js - if self.path.startswith('/_next/'): - print(f"Checking for static file: {self.path}") - # Пробуем найти файл в разных местах - potential_paths = [ - playground_dir / ".next" / "static" / self.path[13:], - playground_dir / "public" / self.path[1:], - playground_dir / ".next" / "standalone" / "public" / self.path[1:], - ] - - for static_path in potential_paths: - if static_path.exists() and static_path.is_file(): - print(f"Found static file at {static_path}") - try: - # Определяем тип файла - content_type = 'application/octet-stream' # По умолчанию - if self.path.endswith('.js'): - content_type = 'application/javascript' - elif self.path.endswith('.css'): - content_type = 'text/css' - elif self.path.endswith('.json'): - content_type = 'application/json' - - # Отправляем файл - self.send_response(200) - self.send_header('Content-Type', content_type) - self.send_header('Content-Length', str(static_path.stat().st_size)) - self.end_headers() - - with open(static_path, 'rb') as f: - self.wfile.write(f.read()) - return - except Exception as file_error: - print(f"Error serving static file {static_path}: {file_error}") - - # Отправляем страницу с ошибкой и разъяснением - self.send_response(500) + if self.path == '/': + self.send_response(200) self.send_header('Content-type', 'text/html; charset=utf-8') self.end_headers() - error_message = f""" + html_content = """ - TEN Agent - Error + TEN Agent - Hugging Face Space -

Произошла ошибка при обработке запроса

-
-

Детали ошибки: {str(e)}

-

Целевой порт: {target_port}

-

Целевой хост: {target_host}

+

TEN Agent запущен успешно!

+
+

TEN Agent API сервер работает на порту 8080.

+

В связи с ограничениями Hugging Face Space, полноценный интерфейс Playground UI не может быть запущен напрямую.

+
+ +
+

API эндпоинты:

+
    +
  • GET /health - проверка состояния сервера
  • +
  • GET /list - список запущенных агентов
  • +
  • GET /graphs - доступные графы
  • +
-

Система пытается запустить все компоненты. Попробуйте обновить страницу через минуту.

-

Технические детали:

-
-                        Public URL: {public_url}
-                        Container IP: {container_ip}
-                        Method: {method}
-                        Path: {self.path}
-                        Static Mode: {is_standalone}
-                        
+ +

Инструкция по локальному использованию

+

Для использования полного интерфейса, подключите локальный Playground к этому API:

+
    +
  1. Клонируйте репозиторий: git clone https://github.com/TEN-framework/TEN-Agent.git
  2. +
  3. Перейдите в директорию playground: cd TEN-Agent/playground
  4. +
  5. Установите зависимости: pnpm install
  6. +
  7. Запустите Playground с подключением к API: AGENT_SERVER_URL=https://nitrox-ten.hf.space pnpm dev
  8. +
  9. Откройте в браузере: http://localhost:3000
  10. +
+ +

См. документацию TEN Agent для получения дополнительной информации.

""" - self.wfile.write(error_message.encode('utf-8')) - - # Запускаем HTTP прокси-сервер - port = 7860 # Hugging Face Space ожидает сервер на порту 7860 - print(f"Starting proxy server on port {port}...") - - # Разрешаем повторное использование адреса и порта - class ReuseAddressServer(socketserver.ThreadingTCPServer): - allow_reuse_address = True - daemon_threads = True - - server = ReuseAddressServer(('0.0.0.0', port), ProxyHandler) + self.wfile.write(html_content.encode('utf-8')) + elif self.path.startswith('/health') or self.path.startswith('/list') or self.path.startswith('/graphs'): + # Проксирование API запросов к API серверу + self.send_response(301) + self.send_header('Location', f'http://localhost:8080{self.path}') + self.end_headers() + else: + self.send_response(404) + self.send_header('Content-type', 'text/plain; charset=utf-8') + self.end_headers() + self.wfile.write(b'Not Found') - # Запускаем сервер - server_thread = threading.Thread(target=server.serve_forever) - server_thread.daemon = True - server_thread.start() + # Запускаем API сервер + print("Starting TEN-Agent API server on port 8080...") + api_server_process = subprocess.Popen([str(api_binary)]) + processes.append(api_server_process) - # Продолжаем выполнение, чтобы можно было обработать сигналы остановки - while True: - # Проверяем, что все процессы еще живы - if not api_server_process.poll() is None: - print("API server has stopped, restarting...") - api_server_process = subprocess.Popen([str(api_binary)], env=api_server_env) - processes = [p for p in processes if p != api_server_process] - processes.append(api_server_process) - - if not playground_process.poll() is None: - print("Playground UI has stopped, restarting...") - # Проверяем наличие standalone режима - if standalone_path.exists(): - playground_process = subprocess.Popen( - "cd /app/playground && node .next/standalone/server.js", - shell=True, - env=playground_env - ) - else: - playground_process = subprocess.Popen( - "cd /app/playground && pnpm start", - shell=True, - env=playground_env - ) - processes = [p for p in processes if p != playground_process] - processes.append(playground_process) - - time.sleep(10) + # Запускаем HTTP сервер для Hugging Face + port = 7860 # Hugging Face Space обычно ожидает сервер на порту 7860 + print(f"Starting HTTP server on port {port}...") + httpd = HTTPServer(('0.0.0.0', port), CustomHandler) + httpd.serve_forever() except KeyboardInterrupt: print("Shutting down...") @@ -404,16 +107,10 @@ def main(): # Завершаем все процессы при выходе for proc in processes: try: - if proc and proc.poll() is None: - proc.terminate() - proc.wait(timeout=5) + proc.terminate() + proc.wait(timeout=5) except: - if proc and proc.poll() is None: - proc.kill() - - # Останавливаем сервер - if 'server' in locals(): - server.shutdown() + proc.kill() return 0 diff --git a/backup/server.py b/backup/server.py new file mode 100644 index 0000000000000000000000000000000000000000..68103dca84e7ad166bfc06000e124c8d3bbfee6e --- /dev/null +++ b/backup/server.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 +from http.server import HTTPServer, SimpleHTTPRequestHandler +import os + +# Получаем порт из переменной окружения PORT или используем 7860 по умолчанию +port = int(os.environ.get("PORT", 7860)) + +# Используем SimpleHTTPRequestHandler для обслуживания файлов из текущей директории +handler = SimpleHTTPRequestHandler +handler.directory = "./static" # Установка корневой директории для сервера + +# Запускаем HTTP-сервер +with HTTPServer(("", port), handler) as httpd: + print(f"Сервер запущен на порту {port}") + httpd.serve_forever() diff --git a/demo/.dockerignore b/demo/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..80ae13cefd3726cdb06adde434068760e05ddc2f --- /dev/null +++ b/demo/.dockerignore @@ -0,0 +1,3 @@ +.git +.next +node_modules diff --git a/demo/.env b/demo/.env new file mode 100644 index 0000000000000000000000000000000000000000..5f92b32496f3e61da237f0ec74bad4c71c483fb6 --- /dev/null +++ b/demo/.env @@ -0,0 +1 @@ +AGENT_SERVER_URL=http://localhost:8080 \ No newline at end of file diff --git a/demo/.gitignore b/demo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..38d6308595b3736c2f1102b7346a176a498fc97e --- /dev/null +++ b/demo/.gitignore @@ -0,0 +1,135 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional stylelint cache +.stylelintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +# .env +!.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# vuepress v2.x temp and cache directory +.temp +.cache + +# Docusaurus cache and generated files +.docusaurus + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* + +# lock +package-lock.json +# yarn.lock diff --git a/demo/.prettierrc.json b/demo/.prettierrc.json new file mode 100644 index 0000000000000000000000000000000000000000..2fa193871ea242cf8318fb3943d4bd169bfb28a2 --- /dev/null +++ b/demo/.prettierrc.json @@ -0,0 +1,9 @@ +{ + "plugins": ["prettier-plugin-tailwindcss"], + "tailwindConfig": "./tailwind.config.js", + "tailwindFunctions": ["clsx", "cn"], + "trailingComma": "all", + "tabWidth": 2, + "semi": false, + "singleQuote": false +} diff --git a/demo/Dockerfile b/demo/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..17bc95432772025a1b7d08bf516a957d6754896c --- /dev/null +++ b/demo/Dockerfile @@ -0,0 +1,68 @@ +FROM node:22-alpine AS base + +# 1. Install dependencies only when needed +FROM base AS deps +# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. +RUN apk add --no-cache libc6-compat + +WORKDIR /app + +# Install dependencies based on the preferred package manager +COPY package.json .yarnrc* yarn.lock* package-lock.json* pnpm-lock.yaml* ./ +RUN \ + if [ -f yarn.lock ]; then corepack enable yarn && yarn --immutable; \ + elif [ -f package-lock.json ]; then npm ci; \ + elif [ -f pnpm-lock.yaml ]; then npm install -g pnpm@10.2.0 && pnpm i; \ + else echo "Lockfile not found." && exit 1; \ + fi + +# 2. Rebuild the source code only when needed +FROM base AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . + +# Next.js collects completely anonymous telemetry data about general usage. +# Learn more here: https://nextjs.org/telemetry +# Uncomment the following line in case you want to disable telemetry during the build. +ENV NEXT_TELEMETRY_DISABLED=1 + +RUN \ + if [ -f yarn.lock ]; then corepack enable yarn && yarn run build; \ + elif [ -f package-lock.json ]; then npm run build; \ + elif [ -f pnpm-lock.yaml ]; then npm install -g pnpm@10.2.0 && pnpm run build; \ + else echo "Lockfile not found." && exit 1; \ + fi + +# 3. Production image, copy all the files and run next +FROM base AS runner +WORKDIR /app + +# Uncomment the following line in case you want to disable telemetry during runtime. +ENV NEXT_TELEMETRY_DISABLED=1 + +ENV NODE_ENV=production + +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +# COPY --from=builder /app/public ./public + +# Set the correct permission for prerender cache +RUN mkdir .next +RUN chown nextjs:nodejs .next + +# Automatically leverage output traces to reduce image size +# https://nextjs.org/docs/advanced-features/output-file-tracing +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static + +USER nextjs + +EXPOSE 3000 + +ENV PORT=3000 + +# server.js is created by next build from the standalone output +# https://nextjs.org/docs/pages/api-reference/next-config-js/output +CMD HOSTNAME="0.0.0.0" node server.js diff --git a/demo/LICENSE b/demo/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e4589a2b76ca4dc830e1f2c41bd24a9d09eb4788 --- /dev/null +++ b/demo/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Agora Community + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/demo/README.md b/demo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..366a43c71fd5780e66505161b033a8e9fbc75ad8 --- /dev/null +++ b/demo/README.md @@ -0,0 +1,36 @@ +## DEMO + +[![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](../LICENSE) +[![Node.js Version](https://img.shields.io/badge/node-%3E%3D20-brightgreen)](package.json) +[![TypeScript](https://img.shields.io/badge/TypeScript-5.0-blue)](tsconfig.json) +[![React](https://img.shields.io/badge/React-18-blue)](package.json) +[![Next.js 15](https://img.shields.io/badge/Next.js-15-black)](package.json) +[![shadcn/ui](https://img.shields.io/badge/UI-shadcn%2Fui-black)](https://ui.shadcn.com) +[![pnpm](https://img.shields.io/badge/pnpm-9.12.3-blue)](package.json) + +Web demo for Ten Agent. + +URL: https://agent.theten.ai + +## Local Development + +### Prerequisites + +- Node.js >= 20 +- [pnpm 9.12.3](https://pnpm.io/installation) + +### Install dependencies + +```bash +# cd ./demo +# install dependencies +# corepack enable +pnpm install +``` + +### Run + +```bash +# run +pnpm dev +``` diff --git a/demo/bun.lockb b/demo/bun.lockb new file mode 100644 index 0000000000000000000000000000000000000000..d45a017a2638dd37bca0b6ef26a97f9b447b12ea --- /dev/null +++ b/demo/bun.lockb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f56d3631d72aa9bebade95a6ce0f3d206538f781803a666e383ceb870af5fe1 +size 295120 diff --git a/demo/components.json b/demo/components.json new file mode 100644 index 0000000000000000000000000000000000000000..d4172679d43fa97d74bc6282ab9bba334dfc155f --- /dev/null +++ b/demo/components.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "tailwind.config.js", + "css": "src/app/global.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + } +} \ No newline at end of file diff --git a/demo/next-env.d.ts b/demo/next-env.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..40c3d68096c270ef976f3db4e9eb42b05c7067bb --- /dev/null +++ b/demo/next-env.d.ts @@ -0,0 +1,5 @@ +/// +/// + +// NOTE: This file should not be edited +// see https://nextjs.org/docs/app/building-your-application/configuring/typescript for more information. diff --git a/demo/next.config.mjs b/demo/next.config.mjs new file mode 100644 index 0000000000000000000000000000000000000000..132f5fc187d8e45c5e105ad7a2b3e2818dcd8fb0 --- /dev/null +++ b/demo/next.config.mjs @@ -0,0 +1,37 @@ +/** @type {import('next').NextConfig} */ + +const nextConfig = { + // basePath: '/ai-agent', + // output: 'export', + output: 'standalone', + reactStrictMode: false, + webpack(config) { + // Grab the existing rule that handles SVG imports + const fileLoaderRule = config.module.rules.find((rule) => + rule.test?.test?.('.svg'), + ) + + config.module.rules.push( + // Reapply the existing rule, but only for svg imports ending in ?url + { + ...fileLoaderRule, + test: /\.svg$/i, + resourceQuery: /url/, // *.svg?url + }, + // Convert all other *.svg imports to React components + { + test: /\.svg$/i, + issuer: fileLoaderRule.issuer, + resourceQuery: { not: [...fileLoaderRule.resourceQuery.not, /url/] }, // exclude if *.svg?url + use: ['@svgr/webpack'], + }, + ) + + // Modify the file loader rule to ignore *.svg, since we have it handled now. + fileLoaderRule.exclude = /\.svg$/i + + return config + } +}; + +export default nextConfig; diff --git a/demo/package.json b/demo/package.json new file mode 100644 index 0000000000000000000000000000000000000000..1f4f509ab15421778d1044aa5dd9abeebc00d467 --- /dev/null +++ b/demo/package.json @@ -0,0 +1,67 @@ +{ + "name": "ten_agent_demo", + "version": "0.4.0", + "private": true, + "engines": { + "node": ">=20" + }, + "scripts": { + "dev": "next dev --turbopack", + "build": "next build", + "start": "next start", + "lint": "next lint", + "proto": "pbjs -t json-module -w commonjs -o src/protobuf/SttMessage.js src/protobuf/SttMessage.proto" + }, + "dependencies": { + "@hookform/resolvers": "^3.9.1", + "@radix-ui/react-avatar": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.2", + "@radix-ui/react-icons": "^1.3.0", + "@radix-ui/react-label": "^2.1.0", + "@radix-ui/react-popover": "^1.1.2", + "@radix-ui/react-select": "^2.1.2", + "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-tabs": "^1.1.1", + "@radix-ui/react-tooltip": "^1.1.3", + "@reduxjs/toolkit": "^2.2.3", + "agora-rtc-sdk-ng": "^4.21.0", + "agora-rtm": "^2.2.0", + "axios": "^1.7.7", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "lucide-react": "^0.453.0", + "next": "^15.0.2", + "next-themes": "^0.3.0", + "protobufjs": "^7.2.5", + "react": "^18", + "react-colorful": "^5.6.1", + "react-dom": "^18", + "react-hook-form": "^7.53.1", + "react-redux": "^9.1.0", + "redux": "^5.0.1", + "sonner": "^1.5.0", + "swr": "^2.2.5", + "tailwind-merge": "^2.5.4", + "tailwindcss-animate": "^1.0.7", + "zod": "^3.23.8" + }, + "devDependencies": { + "@minko-fe/postcss-pxtoviewport": "^1.3.2", + "@svgr/webpack": "^8.1.0", + "@types/node": "^20", + "@types/react": "^18", + "@types/react-dom": "^18", + "@types/react-redux": "^7.1.22", + "autoprefixer": "^10.4.20", + "eslint": "^8", + "eslint-config-next": "^15.0.2", + "postcss": "^8.4.47", + "prettier": "^3.3.3", + "prettier-plugin-tailwindcss": "^0.6.8", + "protobufjs-cli": "^1.1.2", + "sass": "^1.77.5", + "tailwindcss": "^3.4.14", + "typescript": "^5" + }, + "packageManager": "pnpm@10.2.0" +} diff --git a/demo/pnpm-lock.yaml b/demo/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e39c47717c665f950b65fa245d988d4a41f7a74 --- /dev/null +++ b/demo/pnpm-lock.yaml @@ -0,0 +1,7538 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@hookform/resolvers': + specifier: ^3.9.1 + version: 3.9.1(react-hook-form@7.53.2(react@18.3.1)) + '@radix-ui/react-avatar': + specifier: ^1.1.1 + version: 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-dialog': + specifier: ^1.1.2 + version: 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-icons': + specifier: ^1.3.0 + version: 1.3.1(react@18.3.1) + '@radix-ui/react-label': + specifier: ^2.1.0 + version: 2.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-popover': + specifier: ^1.1.2 + version: 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-select': + specifier: ^2.1.2 + version: 2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': + specifier: ^1.1.0 + version: 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-tabs': + specifier: ^1.1.1 + version: 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-tooltip': + specifier: ^1.1.3 + version: 1.1.3(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@reduxjs/toolkit': + specifier: ^2.2.3 + version: 2.3.0(react-redux@9.1.2(@types/react@18.3.12)(react@18.3.1)(redux@5.0.1))(react@18.3.1) + agora-rtc-sdk-ng: + specifier: ^4.21.0 + version: 4.22.2 + agora-rtm: + specifier: ^2.2.0 + version: 2.2.0 + axios: + specifier: ^1.7.7 + version: 1.7.7 + class-variance-authority: + specifier: ^0.7.0 + version: 0.7.0 + clsx: + specifier: ^2.1.1 + version: 2.1.1 + lucide-react: + specifier: ^0.453.0 + version: 0.453.0(react@18.3.1) + next: + specifier: ^15.0.2 + version: 15.0.3(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.80.6) + next-themes: + specifier: ^0.3.0 + version: 0.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + protobufjs: + specifier: ^7.2.5 + version: 7.4.0 + react: + specifier: ^18 + version: 18.3.1 + react-colorful: + specifier: ^5.6.1 + version: 5.6.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react-dom: + specifier: ^18 + version: 18.3.1(react@18.3.1) + react-hook-form: + specifier: ^7.53.1 + version: 7.53.2(react@18.3.1) + react-redux: + specifier: ^9.1.0 + version: 9.1.2(@types/react@18.3.12)(react@18.3.1)(redux@5.0.1) + redux: + specifier: ^5.0.1 + version: 5.0.1 + sonner: + specifier: ^1.5.0 + version: 1.7.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + swr: + specifier: ^2.2.5 + version: 2.2.5(react@18.3.1) + tailwind-merge: + specifier: ^2.5.4 + version: 2.5.4 + tailwindcss-animate: + specifier: ^1.0.7 + version: 1.0.7(tailwindcss@3.4.14) + zod: + specifier: ^3.23.8 + version: 3.23.8 + devDependencies: + '@minko-fe/postcss-pxtoviewport': + specifier: ^1.3.2 + version: 1.5.0(postcss@8.4.48) + '@svgr/webpack': + specifier: ^8.1.0 + version: 8.1.0(typescript@5.6.3) + '@types/node': + specifier: ^20 + version: 20.17.6 + '@types/react': + specifier: ^18 + version: 18.3.12 + '@types/react-dom': + specifier: ^18 + version: 18.3.1 + '@types/react-redux': + specifier: ^7.1.22 + version: 7.1.34 + autoprefixer: + specifier: ^10.4.20 + version: 10.4.20(postcss@8.4.48) + eslint: + specifier: ^8 + version: 8.57.1 + eslint-config-next: + specifier: ^15.0.2 + version: 15.0.3(eslint@8.57.1)(typescript@5.6.3) + postcss: + specifier: ^8.4.47 + version: 8.4.48 + prettier: + specifier: ^3.3.3 + version: 3.3.3 + prettier-plugin-tailwindcss: + specifier: ^0.6.8 + version: 0.6.8(prettier@3.3.3) + protobufjs-cli: + specifier: ^1.1.2 + version: 1.1.3(protobufjs@7.4.0) + sass: + specifier: ^1.77.5 + version: 1.80.6 + tailwindcss: + specifier: ^3.4.14 + version: 3.4.14 + typescript: + specifier: ^5 + version: 5.6.3 + +packages: + + '@agora-js/media@4.22.0': + resolution: {integrity: sha512-6IYuzm6lUQ0xmkg70z+zc4GHSu+VdnuQvq8x12GnYKcKAUf13s3h6EMy68DLG4AjLU0c/bN2uNDt74u9Zwc7vQ==} + + '@agora-js/media@4.22.2': + resolution: {integrity: sha512-Zb9fO1XpPrHasaPYUAPSPRzRuMgA0es7siMlDGXzxV6SgRLpMO9LrFnjsoWVtX664cEXM5TcitMuJHA133GDiA==} + + '@agora-js/report@4.22.0': + resolution: {integrity: sha512-6LfrvRw9O97R1FP00vdPfS4hCjA8WMEllN7JDxTBnfPDaS+XHgu+ewcTkpSnhFVQG2pM45lwuE0G9F0RKLF5Jw==} + + '@agora-js/report@4.22.2': + resolution: {integrity: sha512-7C6RjRCqif9bqzjFf1bf704GCimEXBMMEmGrR1M/qyWBNqW8CrZA7vLxuCg99hd0uICQeYp2kMX6uYYwNQDyUA==} + + '@agora-js/shared@4.22.0': + resolution: {integrity: sha512-taKwc0AqbwCHHJL/2VafRQ7thgOYC1c6tiRweL1X3QpfBjJdXYVjc9jn2zY9NAZO4l4+5f1S9t988d1536XPtQ==} + + '@agora-js/shared@4.22.2': + resolution: {integrity: sha512-8LNJlLCpSKFOfh9Y1lgP7PF6oVbXozg38UMITjUY5pCDxBptThVozf1clwekY919uDkgSLx7F7hS3bRZE0sB/w==} + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.26.2': + resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.26.2': + resolution: {integrity: sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.26.0': + resolution: {integrity: sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.26.2': + resolution: {integrity: sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-annotate-as-pure@7.25.9': + resolution: {integrity: sha512-gv7320KBUFJz1RnylIg5WWYPRXKZ884AGkYpgpWW02TH66Dl+HaC1t1CKd0z3R4b6hdYEcmrNZHUmfCP+1u3/g==} + engines: {node: '>=6.9.0'} + + '@babel/helper-builder-binary-assignment-operator-visitor@7.25.9': + resolution: {integrity: sha512-C47lC7LIDCnz0h4vai/tpNOI95tCd5ZT3iBt/DBH5lXKHZsyNQv18yf1wIIg2ntiQNgmAvA+DgZ82iW8Qdym8g==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.25.9': + resolution: {integrity: sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-create-class-features-plugin@7.25.9': + resolution: {integrity: sha512-UTZQMvt0d/rSz6KI+qdu7GQze5TIajwTS++GUozlw8VBJDEOAqSXwm1WvmYEZwqdqSGQshRocPDqrt4HBZB3fQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-create-regexp-features-plugin@7.25.9': + resolution: {integrity: sha512-ORPNZ3h6ZRkOyAa/SaHU+XsLZr0UQzRwuDQ0cczIA17nAzZ+85G5cVkOJIj7QavLZGSe8QXUmNFxSZzjcZF9bw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-define-polyfill-provider@0.6.2': + resolution: {integrity: sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==} + peerDependencies: + '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 + + '@babel/helper-member-expression-to-functions@7.25.9': + resolution: {integrity: sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.25.9': + resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.26.0': + resolution: {integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-optimise-call-expression@7.25.9': + resolution: {integrity: sha512-FIpuNaz5ow8VyrYcnXQTDRGvV6tTjkNtCK/RYNDXGSLlUD6cBuQTSw43CShGxjvfBTfcUA/r6UhUCbtYqkhcuQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-plugin-utils@7.25.9': + resolution: {integrity: sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-remap-async-to-generator@7.25.9': + resolution: {integrity: sha512-IZtukuUeBbhgOcaW2s06OXTzVNJR0ybm4W5xC1opWFFJMZbwRj5LCk+ByYH7WdZPZTt8KnFwA8pvjN2yqcPlgw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-replace-supers@7.25.9': + resolution: {integrity: sha512-IiDqTOTBQy0sWyeXyGSC5TBJpGFXBkRynjBeXsvbhQFKj2viwJC76Epz35YLU1fpe/Am6Vppb7W7zM4fPQzLsQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-simple-access@7.25.9': + resolution: {integrity: sha512-c6WHXuiaRsJTyHYLJV75t9IqsmTbItYfdj99PnzYGQZkYKvan5/2jKJ7gu31J3/BJ/A18grImSPModuyG/Eo0Q==} + engines: {node: '>=6.9.0'} + + '@babel/helper-skip-transparent-expression-wrappers@7.25.9': + resolution: {integrity: sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.25.9': + resolution: {integrity: sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.25.9': + resolution: {integrity: sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.25.9': + resolution: {integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-wrap-function@7.25.9': + resolution: {integrity: sha512-ETzz9UTjQSTmw39GboatdymDq4XIQbR8ySgVrylRhPOFpsd+JrKHIuF0de7GCWmem+T4uC5z7EZguod7Wj4A4g==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.26.0': + resolution: {integrity: sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.26.2': + resolution: {integrity: sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-bugfix-firefox-class-in-computed-class-key@7.25.9': + resolution: {integrity: sha512-ZkRyVkThtxQ/J6nv3JFYv1RYY+JT5BvU0y3k5bWrmuG4woXypRa4PXmm9RhOwodRkYFWqC0C0cqcJ4OqR7kW+g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-bugfix-safari-class-field-initializer-scope@7.25.9': + resolution: {integrity: sha512-MrGRLZxLD/Zjj0gdU15dfs+HH/OXvnw/U4jJD8vpcP2CJQapPEv1IWwjc/qMg7ItBlPwSv1hRBbb7LeuANdcnw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.25.9': + resolution: {integrity: sha512-2qUwwfAFpJLZqxd02YW9btUCZHl+RFvdDkNfZwaIJrvB8Tesjsk8pEQkTvGwZXLqXUx/2oyY3ySRhm6HOXuCug==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.25.9': + resolution: {integrity: sha512-6xWgLZTJXwilVjlnV7ospI3xi+sl8lN8rXXbBD6vYn3UYDlGsag8wrZkKcSI8G6KgqKP7vNFaDgeDnfAABq61g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.13.0 + + '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@7.25.9': + resolution: {integrity: sha512-aLnMXYPnzwwqhYSCyXfKkIkYgJ8zv9RK+roo9DkTXz38ynIhd9XCbN08s3MGvqL2MYGVUGdRQLL/JqBIeJhJBg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2': + resolution: {integrity: sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-assertions@7.26.0': + resolution: {integrity: sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-attributes@7.26.0': + resolution: {integrity: sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-jsx@7.25.9': + resolution: {integrity: sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-typescript@7.25.9': + resolution: {integrity: sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-unicode-sets-regex@7.18.6': + resolution: {integrity: sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-transform-arrow-functions@7.25.9': + resolution: {integrity: sha512-6jmooXYIwn9ca5/RylZADJ+EnSxVUS5sjeJ9UPk6RWRzXCmOJCy6dqItPJFpw2cuCangPK4OYr5uhGKcmrm5Qg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-async-generator-functions@7.25.9': + resolution: {integrity: sha512-RXV6QAzTBbhDMO9fWwOmwwTuYaiPbggWQ9INdZqAYeSHyG7FzQ+nOZaUUjNwKv9pV3aE4WFqFm1Hnbci5tBCAw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-async-to-generator@7.25.9': + resolution: {integrity: sha512-NT7Ejn7Z/LjUH0Gv5KsBCxh7BH3fbLTV0ptHvpeMvrt3cPThHfJfst9Wrb7S8EvJ7vRTFI7z+VAvFVEQn/m5zQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-block-scoped-functions@7.25.9': + resolution: {integrity: sha512-toHc9fzab0ZfenFpsyYinOX0J/5dgJVA2fm64xPewu7CoYHWEivIWKxkK2rMi4r3yQqLnVmheMXRdG+k239CgA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-block-scoping@7.25.9': + resolution: {integrity: sha512-1F05O7AYjymAtqbsFETboN1NvBdcnzMerO+zlMyJBEz6WkMdejvGWw9p05iTSjC85RLlBseHHQpYaM4gzJkBGg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-class-properties@7.25.9': + resolution: {integrity: sha512-bbMAII8GRSkcd0h0b4X+36GksxuheLFjP65ul9w6C3KgAamI3JqErNgSrosX6ZPj+Mpim5VvEbawXxJCyEUV3Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-class-static-block@7.26.0': + resolution: {integrity: sha512-6J2APTs7BDDm+UMqP1useWqhcRAXo0WIoVj26N7kPFB6S73Lgvyka4KTZYIxtgYXiN5HTyRObA72N2iu628iTQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.12.0 + + '@babel/plugin-transform-classes@7.25.9': + resolution: {integrity: sha512-mD8APIXmseE7oZvZgGABDyM34GUmK45Um2TXiBUt7PnuAxrgoSVf123qUzPxEr/+/BHrRn5NMZCdE2m/1F8DGg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-computed-properties@7.25.9': + resolution: {integrity: sha512-HnBegGqXZR12xbcTHlJ9HGxw1OniltT26J5YpfruGqtUHlz/xKf/G2ak9e+t0rVqrjXa9WOhvYPz1ERfMj23AA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-destructuring@7.25.9': + resolution: {integrity: sha512-WkCGb/3ZxXepmMiX101nnGiU+1CAdut8oHyEOHxkKuS1qKpU2SMXE2uSvfz8PBuLd49V6LEsbtyPhWC7fnkgvQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-dotall-regex@7.25.9': + resolution: {integrity: sha512-t7ZQ7g5trIgSRYhI9pIJtRl64KHotutUJsh4Eze5l7olJv+mRSg4/MmbZ0tv1eeqRbdvo/+trvJD/Oc5DmW2cA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-duplicate-keys@7.25.9': + resolution: {integrity: sha512-LZxhJ6dvBb/f3x8xwWIuyiAHy56nrRG3PeYTpBkkzkYRRQ6tJLu68lEF5VIqMUZiAV7a8+Tb78nEoMCMcqjXBw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-duplicate-named-capturing-groups-regex@7.25.9': + resolution: {integrity: sha512-0UfuJS0EsXbRvKnwcLjFtJy/Sxc5J5jhLHnFhy7u4zih97Hz6tJkLU+O+FMMrNZrosUPxDi6sYxJ/EA8jDiAog==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-transform-dynamic-import@7.25.9': + resolution: {integrity: sha512-GCggjexbmSLaFhqsojeugBpeaRIgWNTcgKVq/0qIteFEqY2A+b9QidYadrWlnbWQUrW5fn+mCvf3tr7OeBFTyg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-exponentiation-operator@7.25.9': + resolution: {integrity: sha512-KRhdhlVk2nObA5AYa7QMgTMTVJdfHprfpAk4DjZVtllqRg9qarilstTKEhpVjyt+Npi8ThRyiV8176Am3CodPA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-export-namespace-from@7.25.9': + resolution: {integrity: sha512-2NsEz+CxzJIVOPx2o9UsW1rXLqtChtLoVnwYHHiB04wS5sgn7mrV45fWMBX0Kk+ub9uXytVYfNP2HjbVbCB3Ww==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-for-of@7.25.9': + resolution: {integrity: sha512-LqHxduHoaGELJl2uhImHwRQudhCM50pT46rIBNvtT/Oql3nqiS3wOwP+5ten7NpYSXrrVLgtZU3DZmPtWZo16A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-function-name@7.25.9': + resolution: {integrity: sha512-8lP+Yxjv14Vc5MuWBpJsoUCd3hD6V9DgBon2FVYL4jJgbnVQ9fTgYmonchzZJOVNgzEgbxp4OwAf6xz6M/14XA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-json-strings@7.25.9': + resolution: {integrity: sha512-xoTMk0WXceiiIvsaquQQUaLLXSW1KJ159KP87VilruQm0LNNGxWzahxSS6T6i4Zg3ezp4vA4zuwiNUR53qmQAw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-literals@7.25.9': + resolution: {integrity: sha512-9N7+2lFziW8W9pBl2TzaNht3+pgMIRP74zizeCSrtnSKVdUl8mAjjOP2OOVQAfZ881P2cNjDj1uAMEdeD50nuQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-logical-assignment-operators@7.25.9': + resolution: {integrity: sha512-wI4wRAzGko551Y8eVf6iOY9EouIDTtPb0ByZx+ktDGHwv6bHFimrgJM/2T021txPZ2s4c7bqvHbd+vXG6K948Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-member-expression-literals@7.25.9': + resolution: {integrity: sha512-PYazBVfofCQkkMzh2P6IdIUaCEWni3iYEerAsRWuVd8+jlM1S9S9cz1dF9hIzyoZ8IA3+OwVYIp9v9e+GbgZhA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-modules-amd@7.25.9': + resolution: {integrity: sha512-g5T11tnI36jVClQlMlt4qKDLlWnG5pP9CSM4GhdRciTNMRgkfpo5cR6b4rGIOYPgRRuFAvwjPQ/Yk+ql4dyhbw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-modules-commonjs@7.25.9': + resolution: {integrity: sha512-dwh2Ol1jWwL2MgkCzUSOvfmKElqQcuswAZypBSUsScMXvgdT8Ekq5YA6TtqpTVWH+4903NmboMuH1o9i8Rxlyg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-modules-systemjs@7.25.9': + resolution: {integrity: sha512-hyss7iIlH/zLHaehT+xwiymtPOpsiwIIRlCAOwBB04ta5Tt+lNItADdlXw3jAWZ96VJ2jlhl/c+PNIQPKNfvcA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-modules-umd@7.25.9': + resolution: {integrity: sha512-bS9MVObUgE7ww36HEfwe6g9WakQ0KF07mQF74uuXdkoziUPfKyu/nIm663kz//e5O1nPInPFx36z7WJmJ4yNEw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-named-capturing-groups-regex@7.25.9': + resolution: {integrity: sha512-oqB6WHdKTGl3q/ItQhpLSnWWOpjUJLsOCLVyeFgeTktkBSCiurvPOsyt93gibI9CmuKvTUEtWmG5VhZD+5T/KA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-transform-new-target@7.25.9': + resolution: {integrity: sha512-U/3p8X1yCSoKyUj2eOBIx3FOn6pElFOKvAAGf8HTtItuPyB+ZeOqfn+mvTtg9ZlOAjsPdK3ayQEjqHjU/yLeVQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-nullish-coalescing-operator@7.25.9': + resolution: {integrity: sha512-ENfftpLZw5EItALAD4WsY/KUWvhUlZndm5GC7G3evUsVeSJB6p0pBeLQUnRnBCBx7zV0RKQjR9kCuwrsIrjWog==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-numeric-separator@7.25.9': + resolution: {integrity: sha512-TlprrJ1GBZ3r6s96Yq8gEQv82s8/5HnCVHtEJScUj90thHQbwe+E5MLhi2bbNHBEJuzrvltXSru+BUxHDoog7Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-object-rest-spread@7.25.9': + resolution: {integrity: sha512-fSaXafEE9CVHPweLYw4J0emp1t8zYTXyzN3UuG+lylqkvYd7RMrsOQ8TYx5RF231be0vqtFC6jnx3UmpJmKBYg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-object-super@7.25.9': + resolution: {integrity: sha512-Kj/Gh+Rw2RNLbCK1VAWj2U48yxxqL2x0k10nPtSdRa0O2xnHXalD0s+o1A6a0W43gJ00ANo38jxkQreckOzv5A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-optional-catch-binding@7.25.9': + resolution: {integrity: sha512-qM/6m6hQZzDcZF3onzIhZeDHDO43bkNNlOX0i8n3lR6zLbu0GN2d8qfM/IERJZYauhAHSLHy39NF0Ctdvcid7g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-optional-chaining@7.25.9': + resolution: {integrity: sha512-6AvV0FsLULbpnXeBjrY4dmWF8F7gf8QnvTEoO/wX/5xm/xE1Xo8oPuD3MPS+KS9f9XBEAWN7X1aWr4z9HdOr7A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-parameters@7.25.9': + resolution: {integrity: sha512-wzz6MKwpnshBAiRmn4jR8LYz/g8Ksg0o80XmwZDlordjwEk9SxBzTWC7F5ef1jhbrbOW2DJ5J6ayRukrJmnr0g==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-private-methods@7.25.9': + resolution: {integrity: sha512-D/JUozNpQLAPUVusvqMxyvjzllRaF8/nSrP1s2YGQT/W4LHK4xxsMcHjhOGTS01mp9Hda8nswb+FblLdJornQw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-private-property-in-object@7.25.9': + resolution: {integrity: sha512-Evf3kcMqzXA3xfYJmZ9Pg1OvKdtqsDMSWBDzZOPLvHiTt36E75jLDQo5w1gtRU95Q4E5PDttrTf25Fw8d/uWLw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-property-literals@7.25.9': + resolution: {integrity: sha512-IvIUeV5KrS/VPavfSM/Iu+RE6llrHrYIKY1yfCzyO/lMXHQ+p7uGhonmGVisv6tSBSVgWzMBohTcvkC9vQcQFA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-constant-elements@7.25.9': + resolution: {integrity: sha512-Ncw2JFsJVuvfRsa2lSHiC55kETQVLSnsYGQ1JDDwkUeWGTL/8Tom8aLTnlqgoeuopWrbbGndrc9AlLYrIosrow==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-display-name@7.25.9': + resolution: {integrity: sha512-KJfMlYIUxQB1CJfO3e0+h0ZHWOTLCPP115Awhaz8U0Zpq36Gl/cXlpoyMRnUWlhNUBAzldnCiAZNvCDj7CrKxQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-development@7.25.9': + resolution: {integrity: sha512-9mj6rm7XVYs4mdLIpbZnHOYdpW42uoiBCTVowg7sP1thUOiANgMb4UtpRivR0pp5iL+ocvUv7X4mZgFRpJEzGw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx@7.25.9': + resolution: {integrity: sha512-s5XwpQYCqGerXl+Pu6VDL3x0j2d82eiV77UJ8a2mDHAW7j9SWRqQ2y1fNo1Z74CdcYipl5Z41zvjj4Nfzq36rw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-pure-annotations@7.25.9': + resolution: {integrity: sha512-KQ/Takk3T8Qzj5TppkS1be588lkbTp5uj7w6a0LeQaTMSckU/wK0oJ/pih+T690tkgI5jfmg2TqDJvd41Sj1Cg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-regenerator@7.25.9': + resolution: {integrity: sha512-vwDcDNsgMPDGP0nMqzahDWE5/MLcX8sv96+wfX7as7LoF/kr97Bo/7fI00lXY4wUXYfVmwIIyG80fGZ1uvt2qg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-regexp-modifiers@7.26.0': + resolution: {integrity: sha512-vN6saax7lrA2yA/Pak3sCxuD6F5InBjn9IcrIKQPjpsLvuHYLVroTxjdlVRHjjBWxKOqIwpTXDkOssYT4BFdRw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/plugin-transform-reserved-words@7.25.9': + resolution: {integrity: sha512-7DL7DKYjn5Su++4RXu8puKZm2XBPHyjWLUidaPEkCUBbE7IPcsrkRHggAOOKydH1dASWdcUBxrkOGNxUv5P3Jg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-shorthand-properties@7.25.9': + resolution: {integrity: sha512-MUv6t0FhO5qHnS/W8XCbHmiRWOphNufpE1IVxhK5kuN3Td9FT1x4rx4K42s3RYdMXCXpfWkGSbCSd0Z64xA7Ng==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-spread@7.25.9': + resolution: {integrity: sha512-oNknIB0TbURU5pqJFVbOOFspVlrpVwo2H1+HUIsVDvp5VauGGDP1ZEvO8Nn5xyMEs3dakajOxlmkNW7kNgSm6A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-sticky-regex@7.25.9': + resolution: {integrity: sha512-WqBUSgeVwucYDP9U/xNRQam7xV8W5Zf+6Eo7T2SRVUFlhRiMNFdFz58u0KZmCVVqs2i7SHgpRnAhzRNmKfi2uA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-template-literals@7.25.9': + resolution: {integrity: sha512-o97AE4syN71M/lxrCtQByzphAdlYluKPDBzDVzMmfCobUjjhAryZV0AIpRPrxN0eAkxXO6ZLEScmt+PNhj2OTw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-typeof-symbol@7.25.9': + resolution: {integrity: sha512-v61XqUMiueJROUv66BVIOi0Fv/CUuZuZMl5NkRoCVxLAnMexZ0A3kMe7vvZ0nulxMuMp0Mk6S5hNh48yki08ZA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-typescript@7.25.9': + resolution: {integrity: sha512-7PbZQZP50tzv2KGGnhh82GSyMB01yKY9scIjf1a+GfZCtInOWqUH5+1EBU4t9fyR5Oykkkc9vFTs4OHrhHXljQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-unicode-escapes@7.25.9': + resolution: {integrity: sha512-s5EDrE6bW97LtxOcGj1Khcx5AaXwiMmi4toFWRDP9/y0Woo6pXC+iyPu/KuhKtfSrNFd7jJB+/fkOtZy6aIC6Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-unicode-property-regex@7.25.9': + resolution: {integrity: sha512-Jt2d8Ga+QwRluxRQ307Vlxa6dMrYEMZCgGxoPR8V52rxPyldHu3hdlHspxaqYmE7oID5+kB+UKUB/eWS+DkkWg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-unicode-regex@7.25.9': + resolution: {integrity: sha512-yoxstj7Rg9dlNn9UQxzk4fcNivwv4nUYz7fYXBaKxvw/lnmPuOm/ikoELygbYq68Bls3D/D+NBPHiLwZdZZ4HA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-unicode-sets-regex@7.25.9': + resolution: {integrity: sha512-8BYqO3GeVNHtx69fdPshN3fnzUNLrWdHhk/icSwigksJGczKSizZ+Z6SBCxTs723Fr5VSNorTIK7a+R2tISvwQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/preset-env@7.26.0': + resolution: {integrity: sha512-H84Fxq0CQJNdPFT2DrfnylZ3cf5K43rGfWK4LJGPpjKHiZlk0/RzwEus3PDDZZg+/Er7lCA03MVacueUuXdzfw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/preset-modules@0.1.6-no-external-plugins': + resolution: {integrity: sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==} + peerDependencies: + '@babel/core': ^7.0.0-0 || ^8.0.0-0 <8.0.0 + + '@babel/preset-react@7.25.9': + resolution: {integrity: sha512-D3to0uSPiWE7rBrdIICCd0tJSIGpLaaGptna2+w7Pft5xMqLpA1sz99DK5TZ1TjGbdQ/VI1eCSZ06dv3lT4JOw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/preset-typescript@7.26.0': + resolution: {integrity: sha512-NMk1IGZ5I/oHhoXEElcm+xUnL/szL6xflkFZmoEU9xj1qSJXpiS7rsspYo92B4DRCDvZn2erT5LdsCeXAKNCkg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/runtime@7.26.0': + resolution: {integrity: sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==} + engines: {node: '>=6.9.0'} + + '@babel/template@7.25.9': + resolution: {integrity: sha512-9DGttpmPvIxBb/2uwpVo3dqJ+O6RooAFOS+lB+xDqoE2PVCE8nfoHMdZLpfCQRLwvohzXISPZcgxt80xLfsuwg==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.25.9': + resolution: {integrity: sha512-ZCuvfwOwlz/bawvAuvcj8rrithP2/N55Tzz342AkTvq4qaWbGfmCk/tKhNaV2cthijKrPAA8SRJV5WWe7IBMJw==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.26.0': + resolution: {integrity: sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==} + engines: {node: '>=6.9.0'} + + '@emnapi/runtime@1.3.1': + resolution: {integrity: sha512-kEBmG8KyqtxJZv+ygbEim+KCGtIq1fC22Ms3S4ziXmYKm8uyoLX0MHONVKwp+9opg390VaKRNt4a7A9NwmpNhw==} + + '@eslint-community/eslint-utils@4.4.1': + resolution: {integrity: sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.1': + resolution: {integrity: sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/eslintrc@2.1.4': + resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@eslint/js@8.57.1': + resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@floating-ui/core@1.6.8': + resolution: {integrity: sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA==} + + '@floating-ui/dom@1.6.12': + resolution: {integrity: sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==} + + '@floating-ui/react-dom@2.1.2': + resolution: {integrity: sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@floating-ui/utils@0.2.8': + resolution: {integrity: sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==} + + '@hookform/resolvers@3.9.1': + resolution: {integrity: sha512-ud2HqmGBM0P0IABqoskKWI6PEf6ZDDBZkFqe2Vnl+mTHCEHzr3ISjjZyCwTjC/qpL25JC9aIDkloQejvMeq0ug==} + peerDependencies: + react-hook-form: ^7.0.0 + + '@humanwhocodes/config-array@0.13.0': + resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==} + engines: {node: '>=10.10.0'} + deprecated: Use @eslint/config-array instead + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/object-schema@2.0.3': + resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} + deprecated: Use @eslint/object-schema instead + + '@img/sharp-darwin-arm64@0.33.5': + resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + + '@img/sharp-darwin-x64@0.33.5': + resolution: {integrity: sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-darwin-arm64@1.0.4': + resolution: {integrity: sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==} + cpu: [arm64] + os: [darwin] + + '@img/sharp-libvips-darwin-x64@1.0.4': + resolution: {integrity: sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-linux-arm64@1.0.4': + resolution: {integrity: sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linux-arm@1.0.5': + resolution: {integrity: sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==} + cpu: [arm] + os: [linux] + + '@img/sharp-libvips-linux-s390x@1.0.4': + resolution: {integrity: sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==} + cpu: [s390x] + os: [linux] + + '@img/sharp-libvips-linux-x64@1.0.4': + resolution: {integrity: sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==} + cpu: [x64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': + resolution: {integrity: sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-x64@1.0.4': + resolution: {integrity: sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==} + cpu: [x64] + os: [linux] + + '@img/sharp-linux-arm64@0.33.5': + resolution: {integrity: sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linux-arm@0.33.5': + resolution: {integrity: sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + + '@img/sharp-linux-s390x@0.33.5': + resolution: {integrity: sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + + '@img/sharp-linux-x64@0.33.5': + resolution: {integrity: sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-linuxmusl-arm64@0.33.5': + resolution: {integrity: sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linuxmusl-x64@0.33.5': + resolution: {integrity: sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-wasm32@0.33.5': + resolution: {integrity: sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + + '@img/sharp-win32-ia32@0.33.5': + resolution: {integrity: sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + + '@img/sharp-win32-x64@0.33.5': + resolution: {integrity: sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + + '@isaacs/cliui@8.0.2': + resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} + engines: {node: '>=12'} + + '@jridgewell/gen-mapping@0.3.5': + resolution: {integrity: sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@jsdoc/salty@0.2.8': + resolution: {integrity: sha512-5e+SFVavj1ORKlKaKr2BmTOekmXbelU7dC0cDkQLqag7xfuTPuGMUFx7KWJuv4bYZrTsoL2Z18VVCOKYxzoHcg==} + engines: {node: '>=v12.0.0'} + + '@minko-fe/lodash-pro@0.3.3': + resolution: {integrity: sha512-vK9JXI0THoB4myxhJAahqjP84PCmzdaXJgnb1+NfxLEAHClqPrOnUY2rh5RqU1+BAfsFaj3F4ErVw3h1kzzWeA==} + + '@minko-fe/postcss-pxtoviewport@1.5.0': + resolution: {integrity: sha512-sAI9nf4QujIZAAX2l/Lmxk6PACTf2HvW4hkKNzHjmHcObdkUZsxdCgxwb8aUPCu6oucpoeINiz3o7mm2eK1DxA==} + peerDependencies: + postcss: '>=8.0.0' + + '@next/env@15.0.3': + resolution: {integrity: sha512-t9Xy32pjNOvVn2AS+Utt6VmyrshbpfUMhIjFO60gI58deSo/KgLOp31XZ4O+kY/Is8WAGYwA5gR7kOb1eORDBA==} + + '@next/eslint-plugin-next@15.0.3': + resolution: {integrity: sha512-3Ln/nHq2V+v8uIaxCR6YfYo7ceRgZNXfTd3yW1ukTaFbO+/I8jNakrjYWODvG9BuR2v5kgVtH/C8r0i11quOgw==} + + '@next/swc-darwin-arm64@15.0.3': + resolution: {integrity: sha512-s3Q/NOorCsLYdCKvQlWU+a+GeAd3C8Rb3L1YnetsgwXzhc3UTWrtQpB/3eCjFOdGUj5QmXfRak12uocd1ZiiQw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@next/swc-darwin-x64@15.0.3': + resolution: {integrity: sha512-Zxl/TwyXVZPCFSf0u2BNj5sE0F2uR6iSKxWpq4Wlk/Sv9Ob6YCKByQTkV2y6BCic+fkabp9190hyrDdPA/dNrw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@next/swc-linux-arm64-gnu@15.0.3': + resolution: {integrity: sha512-T5+gg2EwpsY3OoaLxUIofmMb7ohAUlcNZW0fPQ6YAutaWJaxt1Z1h+8zdl4FRIOr5ABAAhXtBcpkZNwUcKI2fw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@next/swc-linux-arm64-musl@15.0.3': + resolution: {integrity: sha512-WkAk6R60mwDjH4lG/JBpb2xHl2/0Vj0ZRu1TIzWuOYfQ9tt9NFsIinI1Epma77JVgy81F32X/AeD+B2cBu/YQA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@next/swc-linux-x64-gnu@15.0.3': + resolution: {integrity: sha512-gWL/Cta1aPVqIGgDb6nxkqy06DkwJ9gAnKORdHWX1QBbSZZB+biFYPFti8aKIQL7otCE1pjyPaXpFzGeG2OS2w==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@next/swc-linux-x64-musl@15.0.3': + resolution: {integrity: sha512-QQEMwFd8r7C0GxQS62Zcdy6GKx999I/rTO2ubdXEe+MlZk9ZiinsrjwoiBL5/57tfyjikgh6GOU2WRQVUej3UA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@next/swc-win32-arm64-msvc@15.0.3': + resolution: {integrity: sha512-9TEp47AAd/ms9fPNgtgnT7F3M1Hf7koIYYWCMQ9neOwjbVWJsHZxrFbI3iEDJ8rf1TDGpmHbKxXf2IFpAvheIQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@next/swc-win32-x64-msvc@15.0.3': + resolution: {integrity: sha512-VNAz+HN4OGgvZs6MOoVfnn41kBzT+M+tB+OK4cww6DNyWS6wKaDpaAm/qLeOUbnMh0oVx1+mg0uoYARF69dJyA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@nolyfill/is-core-module@1.0.39': + resolution: {integrity: sha512-nn5ozdjYQpUCZlWGuxcJY/KpxkWQs4DcbMCmKojjyrYDEAGy4Ce19NN4v5MduafTwJlbKc99UA8YhSVqq9yPZA==} + engines: {node: '>=12.4.0'} + + '@parcel/watcher-android-arm64@2.5.0': + resolution: {integrity: sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ==} + engines: {node: '>= 10.0.0'} + cpu: [arm64] + os: [android] + + '@parcel/watcher-darwin-arm64@2.5.0': + resolution: {integrity: sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw==} + engines: {node: '>= 10.0.0'} + cpu: [arm64] + os: [darwin] + + '@parcel/watcher-darwin-x64@2.5.0': + resolution: {integrity: sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA==} + engines: {node: '>= 10.0.0'} + cpu: [x64] + os: [darwin] + + '@parcel/watcher-freebsd-x64@2.5.0': + resolution: {integrity: sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw==} + engines: {node: '>= 10.0.0'} + cpu: [x64] + os: [freebsd] + + '@parcel/watcher-linux-arm-glibc@2.5.0': + resolution: {integrity: sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA==} + engines: {node: '>= 10.0.0'} + cpu: [arm] + os: [linux] + + '@parcel/watcher-linux-arm-musl@2.5.0': + resolution: {integrity: sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA==} + engines: {node: '>= 10.0.0'} + cpu: [arm] + os: [linux] + + '@parcel/watcher-linux-arm64-glibc@2.5.0': + resolution: {integrity: sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA==} + engines: {node: '>= 10.0.0'} + cpu: [arm64] + os: [linux] + + '@parcel/watcher-linux-arm64-musl@2.5.0': + resolution: {integrity: sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q==} + engines: {node: '>= 10.0.0'} + cpu: [arm64] + os: [linux] + + '@parcel/watcher-linux-x64-glibc@2.5.0': + resolution: {integrity: sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw==} + engines: {node: '>= 10.0.0'} + cpu: [x64] + os: [linux] + + '@parcel/watcher-linux-x64-musl@2.5.0': + resolution: {integrity: sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA==} + engines: {node: '>= 10.0.0'} + cpu: [x64] + os: [linux] + + '@parcel/watcher-win32-arm64@2.5.0': + resolution: {integrity: sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig==} + engines: {node: '>= 10.0.0'} + cpu: [arm64] + os: [win32] + + '@parcel/watcher-win32-ia32@2.5.0': + resolution: {integrity: sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA==} + engines: {node: '>= 10.0.0'} + cpu: [ia32] + os: [win32] + + '@parcel/watcher-win32-x64@2.5.0': + resolution: {integrity: sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw==} + engines: {node: '>= 10.0.0'} + cpu: [x64] + os: [win32] + + '@parcel/watcher@2.5.0': + resolution: {integrity: sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ==} + engines: {node: '>= 10.0.0'} + + '@pkgjs/parseargs@0.11.0': + resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} + engines: {node: '>=14'} + + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + + '@radix-ui/number@1.1.0': + resolution: {integrity: sha512-V3gRzhVNU1ldS5XhAPTom1fOIo4ccrjjJgmE+LI2h/WaFpHmx0MQApT+KZHnx8abG6Avtfcz4WoEciMnpFT3HQ==} + + '@radix-ui/primitive@1.1.0': + resolution: {integrity: sha512-4Z8dn6Upk0qk4P74xBhZ6Hd/w0mPEzOOLxy4xiPXOXqjF7jZS0VAKk7/x/H6FyY2zCkYJqePf1G5KmkmNJ4RBA==} + + '@radix-ui/react-arrow@1.1.0': + resolution: {integrity: sha512-FmlW1rCg7hBpEBwFbjHwCW6AmWLQM6g/v0Sn8XbP9NvmSZ2San1FpQeyPtufzOMSIx7Y4dzjlHoifhp+7NkZhw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-avatar@1.1.1': + resolution: {integrity: sha512-eoOtThOmxeoizxpX6RiEsQZ2wj5r4+zoeqAwO0cBaFQGjJwIH3dIX0OCxNrCyrrdxG+vBweMETh3VziQG7c1kw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-collection@1.1.0': + resolution: {integrity: sha512-GZsZslMJEyo1VKm5L1ZJY8tGDxZNPAoUeQUIbKeJfoi7Q4kmig5AsgLMYYuyYbfjd8fBmFORAIwYAkXMnXZgZw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-compose-refs@1.1.0': + resolution: {integrity: sha512-b4inOtiaOnYf9KWyO3jAeeCG6FeyfY6ldiEPanbUjWd+xIk5wZeHa8yVwmrJ2vderhu/BQvzCrJI0lHd+wIiqw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-context@1.1.0': + resolution: {integrity: sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-context@1.1.1': + resolution: {integrity: sha512-UASk9zi+crv9WteK/NU4PLvOoL3OuE6BWVKNF6hPRBtYBDXQ2u5iu3O59zUlJiTVvkyuycnqrztsHVJwcK9K+Q==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-dialog@1.1.2': + resolution: {integrity: sha512-Yj4dZtqa2o+kG61fzB0H2qUvmwBA2oyQroGLyNtBj1beo1khoQ3q1a2AO8rrQYjd8256CO9+N8L9tvsS+bnIyA==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-direction@1.1.0': + resolution: {integrity: sha512-BUuBvgThEiAXh2DWu93XsT+a3aWrGqolGlqqw5VU1kG7p/ZH2cuDlM1sRLNnY3QcBS69UIz2mcKhMxDsdewhjg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-dismissable-layer@1.1.1': + resolution: {integrity: sha512-QSxg29lfr/xcev6kSz7MAlmDnzbP1eI/Dwn3Tp1ip0KT5CUELsxkekFEMVBEoykI3oV39hKT4TKZzBNMbcTZYQ==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-focus-guards@1.1.1': + resolution: {integrity: sha512-pSIwfrT1a6sIoDASCSpFwOasEwKTZWDw/iBdtnqKO7v6FeOzYJ7U53cPzYFVR3geGGXgVHaH+CdngrrAzqUGxg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-focus-scope@1.1.0': + resolution: {integrity: sha512-200UD8zylvEyL8Bx+z76RJnASR2gRMuxlgFCPAe/Q/679a/r0eK3MBVYMb7vZODZcffZBdob1EGnky78xmVvcA==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-icons@1.3.1': + resolution: {integrity: sha512-QvYompk0X+8Yjlo/Fv4McrzxohDdM5GgLHyQcPpcsPvlOSXCGFjdbuyGL5dzRbg0GpknAjQJJZzdiRK7iWVuFQ==} + peerDependencies: + react: ^16.x || ^17.x || ^18.x || ^19.x + + '@radix-ui/react-id@1.1.0': + resolution: {integrity: sha512-EJUrI8yYh7WOjNOqpoJaf1jlFIH2LvtgAl+YcFqNCa+4hj64ZXmPkAKOFs/ukjz3byN6bdb/AVUqHkI8/uWWMA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-label@2.1.0': + resolution: {integrity: sha512-peLblDlFw/ngk3UWq0VnYaOLy6agTZZ+MUO/WhVfm14vJGML+xH4FAl2XQGLqdefjNb7ApRg6Yn7U42ZhmYXdw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-popover@1.1.2': + resolution: {integrity: sha512-u2HRUyWW+lOiA2g0Le0tMmT55FGOEWHwPFt1EPfbLly7uXQExFo5duNKqG2DzmFXIdqOeNd+TpE8baHWJCyP9w==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-popper@1.2.0': + resolution: {integrity: sha512-ZnRMshKF43aBxVWPWvbj21+7TQCvhuULWJ4gNIKYpRlQt5xGRhLx66tMp8pya2UkGHTSlhpXwmjqltDYHhw7Vg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-portal@1.1.2': + resolution: {integrity: sha512-WeDYLGPxJb/5EGBoedyJbT0MpoULmwnIPMJMSldkuiMsBAv7N1cRdsTWZWht9vpPOiN3qyiGAtbK2is47/uMFg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-presence@1.1.1': + resolution: {integrity: sha512-IeFXVi4YS1K0wVZzXNrbaaUvIJ3qdY+/Ih4eHFhWA9SwGR9UDX7Ck8abvL57C4cv3wwMvUE0OG69Qc3NCcTe/A==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-primitive@2.0.0': + resolution: {integrity: sha512-ZSpFm0/uHa8zTvKBDjLFWLo8dkr4MBsiDLz0g3gMUwqgLHz9rTaRRGYDgvZPtBJgYCBKXkS9fzmoySgr8CO6Cw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-roving-focus@1.1.0': + resolution: {integrity: sha512-EA6AMGeq9AEeQDeSH0aZgG198qkfHSbvWTf1HvoDmOB5bBG/qTxjYMWUKMnYiV6J/iP/J8MEFSuB2zRU2n7ODA==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-select@2.1.2': + resolution: {integrity: sha512-rZJtWmorC7dFRi0owDmoijm6nSJH1tVw64QGiNIZ9PNLyBDtG+iAq+XGsya052At4BfarzY/Dhv9wrrUr6IMZA==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-slot@1.1.0': + resolution: {integrity: sha512-FUCf5XMfmW4dtYl69pdS4DbxKy8nj4M7SafBgPllysxmdachynNflAdp/gCsnYWNDnge6tI9onzMp5ARYc1KNw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-tabs@1.1.1': + resolution: {integrity: sha512-3GBUDmP2DvzmtYLMsHmpA1GtR46ZDZ+OreXM/N+kkQJOPIgytFWWTfDQmBQKBvaFS0Vno0FktdbVzN28KGrMdw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-tooltip@1.1.3': + resolution: {integrity: sha512-Z4w1FIS0BqVFI2c1jZvb/uDVJijJjJ2ZMuPV81oVgTZ7g3BZxobplnMVvXtFWgtozdvYJ+MFWtwkM5S2HnAong==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-use-callback-ref@1.1.0': + resolution: {integrity: sha512-CasTfvsy+frcFkbXtSJ2Zu9JHpN8TYKxkgJGWbjiZhFivxaeW7rMeZt7QELGVLaYVfFMsKHjb7Ak0nMEe+2Vfw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-controllable-state@1.1.0': + resolution: {integrity: sha512-MtfMVJiSr2NjzS0Aa90NPTnvTSg6C/JLCV7ma0W6+OMV78vd8OyRpID+Ng9LxzsPbLeuBnWBA1Nq30AtBIDChw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-escape-keydown@1.1.0': + resolution: {integrity: sha512-L7vwWlR1kTTQ3oh7g1O0CBF3YCyyTj8NmhLR+phShpyA50HCfBFKVJTpshm9PzLiKmehsrQzTYTpX9HvmC9rhw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-layout-effect@1.1.0': + resolution: {integrity: sha512-+FPE0rOdziWSrH9athwI1R0HDVbWlEhd+FR+aSDk4uWGmSJ9Z54sdZVDQPZAinJhJXwfT+qnj969mCsT2gfm5w==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-previous@1.1.0': + resolution: {integrity: sha512-Z/e78qg2YFnnXcW88A4JmTtm4ADckLno6F7OXotmkQfeuCVaKuYzqAATPhVzl3delXE7CxIV8shofPn3jPc5Og==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-rect@1.1.0': + resolution: {integrity: sha512-0Fmkebhr6PiseyZlYAOtLS+nb7jLmpqTrJyv61Pe68MKYW6OWdRE2kI70TaYY27u7H0lajqM3hSMMLFq18Z7nQ==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-size@1.1.0': + resolution: {integrity: sha512-XW3/vWuIXHa+2Uwcc2ABSfcCledmXhhQPlGbfcRXbiUQI5Icjcg19BGCZVKKInYbvUCut/ufbbLLPFC5cbb1hw==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-visually-hidden@1.1.0': + resolution: {integrity: sha512-N8MDZqtgCgG5S3aV60INAB475osJousYpZ4cTJ2cFbMpdHS5Y6loLTH8LPtkj2QN0x93J30HT/M3qJXM0+lyeQ==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/rect@1.1.0': + resolution: {integrity: sha512-A9+lCBZoaMJlVKcRBz2YByCG+Cp2t6nAnMnNba+XiWxnj6r4JUFqfsgwocMBZU9LPtdxC6wB56ySYpc7LQIoJg==} + + '@reduxjs/toolkit@2.3.0': + resolution: {integrity: sha512-WC7Yd6cNGfHx8zf+iu+Q1UPTfEcXhQ+ATi7CV1hlrSAaQBdlPzg7Ww/wJHNQem7qG9rxmWoFCDCPubSvFObGzA==} + peerDependencies: + react: ^16.9.0 || ^17.0.0 || ^18 + react-redux: ^7.2.1 || ^8.1.3 || ^9.0.0 + peerDependenciesMeta: + react: + optional: true + react-redux: + optional: true + + '@rtsao/scc@1.1.0': + resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} + + '@rushstack/eslint-patch@1.10.4': + resolution: {integrity: sha512-WJgX9nzTqknM393q1QJDJmoW28kUfEnybeTfVNcNAPnIx210RXm2DiXiHzfNPJNIUUb1tJnz/l4QGtJ30PgWmA==} + + '@svgr/babel-plugin-add-jsx-attribute@8.0.0': + resolution: {integrity: sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-remove-jsx-attribute@8.0.0': + resolution: {integrity: sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0': + resolution: {integrity: sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0': + resolution: {integrity: sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-svg-dynamic-title@8.0.0': + resolution: {integrity: sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-svg-em-dimensions@8.0.0': + resolution: {integrity: sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-transform-react-native-svg@8.1.0': + resolution: {integrity: sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-plugin-transform-svg-component@8.0.0': + resolution: {integrity: sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==} + engines: {node: '>=12'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/babel-preset@8.1.0': + resolution: {integrity: sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==} + engines: {node: '>=14'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@svgr/core@8.1.0': + resolution: {integrity: sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==} + engines: {node: '>=14'} + + '@svgr/hast-util-to-babel-ast@8.0.0': + resolution: {integrity: sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==} + engines: {node: '>=14'} + + '@svgr/plugin-jsx@8.1.0': + resolution: {integrity: sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==} + engines: {node: '>=14'} + peerDependencies: + '@svgr/core': '*' + + '@svgr/plugin-svgo@8.1.0': + resolution: {integrity: sha512-Ywtl837OGO9pTLIN/onoWLmDQ4zFUycI1g76vuKGEz6evR/ZTJlJuz3G/fIkb6OVBJ2g0o6CGJzaEjfmEo3AHA==} + engines: {node: '>=14'} + peerDependencies: + '@svgr/core': '*' + + '@svgr/webpack@8.1.0': + resolution: {integrity: sha512-LnhVjMWyMQV9ZmeEy26maJk+8HTIbd59cH4F2MJ439k9DqejRisfFNGAPvRYlKETuh9LrImlS8aKsBgKjMA8WA==} + engines: {node: '>=14'} + + '@swc/counter@0.1.3': + resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} + + '@swc/helpers@0.5.13': + resolution: {integrity: sha512-UoKGxQ3r5kYI9dALKJapMmuK+1zWM/H17Z1+iwnNmzcJRnfFuevZs375TA5rW31pu4BS4NoSy1fRsexDXfWn5w==} + + '@trysound/sax@0.2.0': + resolution: {integrity: sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==} + engines: {node: '>=10.13.0'} + + '@types/hoist-non-react-statics@3.3.5': + resolution: {integrity: sha512-SbcrWzkKBw2cdwRTwQAswfpB9g9LJWfjtUeW/jvNwbhC8cpmmNYVePa+ncbUe0rGTQ7G3Ff6mYUN2VMfLVr+Sg==} + + '@types/json5@0.0.29': + resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==} + + '@types/linkify-it@5.0.0': + resolution: {integrity: sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q==} + + '@types/lodash-es@4.17.12': + resolution: {integrity: sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==} + + '@types/lodash@4.17.13': + resolution: {integrity: sha512-lfx+dftrEZcdBPczf9d0Qv0x+j/rfNCMuC6OcfXmO8gkfeNAY88PgKUbvG56whcN23gc27yenwF6oJZXGFpYxg==} + + '@types/markdown-it@14.1.2': + resolution: {integrity: sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog==} + + '@types/mdurl@2.0.0': + resolution: {integrity: sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg==} + + '@types/node@20.17.6': + resolution: {integrity: sha512-VEI7OdvK2wP7XHnsuXbAJnEpEkF6NjSN45QJlL4VGqZSXsnicpesdTWsg9RISeSdYd3yeRj/y3k5KGjUXYnFwQ==} + + '@types/prop-types@15.7.13': + resolution: {integrity: sha512-hCZTSvwbzWGvhqxp/RqVqwU999pBf2vp7hzIjiYOsl8wqOmUxkQ6ddw1cV3l8811+kdUFus/q4d1Y3E3SyEifA==} + + '@types/react-dom@18.3.1': + resolution: {integrity: sha512-qW1Mfv8taImTthu4KoXgDfLuk4bydU6Q/TkADnDWWHwi4NX4BR+LWfTp2sVmTqRrsHvyDDTelgelxJ+SsejKKQ==} + + '@types/react-redux@7.1.34': + resolution: {integrity: sha512-GdFaVjEbYv4Fthm2ZLvj1VSCedV7TqE5y1kNwnjSdBOTXuRSgowux6J8TAct15T3CKBr63UMk+2CO7ilRhyrAQ==} + + '@types/react@18.3.12': + resolution: {integrity: sha512-D2wOSq/d6Agt28q7rSI3jhU7G6aiuzljDGZ2hTZHIkrTLUI+AF3WMeKkEZ9nN2fkBAlcktT6vcZjDFiIhMYEQw==} + + '@types/use-sync-external-store@0.0.3': + resolution: {integrity: sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==} + + '@typescript-eslint/eslint-plugin@8.13.0': + resolution: {integrity: sha512-nQtBLiZYMUPkclSeC3id+x4uVd1SGtHuElTxL++SfP47jR0zfkZBJHc+gL4qPsgTuypz0k8Y2GheaDYn6Gy3rg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@typescript-eslint/parser': ^8.0.0 || ^8.0.0-alpha.0 + eslint: ^8.57.0 || ^9.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/parser@8.13.0': + resolution: {integrity: sha512-w0xp+xGg8u/nONcGw1UXAr6cjCPU1w0XVyBs6Zqaj5eLmxkKQAByTdV/uGgNN5tVvN/kKpoQlP2cL7R+ajZZIQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/scope-manager@8.13.0': + resolution: {integrity: sha512-XsGWww0odcUT0gJoBZ1DeulY1+jkaHUciUq4jKNv4cpInbvvrtDoyBH9rE/n2V29wQJPk8iCH1wipra9BhmiMA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/type-utils@8.13.0': + resolution: {integrity: sha512-Rqnn6xXTR316fP4D2pohZenJnp+NwQ1mo7/JM+J1LWZENSLkJI8ID8QNtlvFeb0HnFSK94D6q0cnMX6SbE5/vA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/types@8.13.0': + resolution: {integrity: sha512-4cyFErJetFLckcThRUFdReWJjVsPCqyBlJTi6IDEpc1GWCIIZRFxVppjWLIMcQhNGhdWJJRYFHpHoDWvMlDzng==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript-eslint/typescript-estree@8.13.0': + resolution: {integrity: sha512-v7SCIGmVsRK2Cy/LTLGN22uea6SaUIlpBcO/gnMGT/7zPtxp90bphcGf4fyrCQl3ZtiBKqVTG32hb668oIYy1g==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/utils@8.13.0': + resolution: {integrity: sha512-A1EeYOND6Uv250nybnLZapeXpYMl8tkzYUxqmoKAWnI4sei3ihf2XdZVd+vVOmHGcp3t+P7yRrNsyyiXTvShFQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + + '@typescript-eslint/visitor-keys@8.13.0': + resolution: {integrity: sha512-7N/+lztJqH4Mrf0lb10R/CbI1EaAMMGyF5y0oJvFoAhafwgiRA7TXyd8TFn8FC8k5y2dTsYogg238qavRGNnlw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@ungap/structured-clone@1.2.0': + resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.14.0: + resolution: {integrity: sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==} + engines: {node: '>=0.4.0'} + hasBin: true + + agora-rtc-sdk-ng@4.22.0: + resolution: {integrity: sha512-mP6BDNP6oV01IQV7yXZ4wIuwDpoHaK6ARiDVKDaD+fK3LXXWqUtCweZLwzTde+OYkauPEsivqNbkAp/q6Ggqtg==} + + agora-rtc-sdk-ng@4.22.2: + resolution: {integrity: sha512-lxE6nBxBfPcF22ecWLWn54e0+TK09F3/KXDzZO5dkCq7iwaBzrBkmMtfCWp4wjjS7qGKHkqbWelfLjeMcwGaVw==} + + agora-rte-extension@1.2.4: + resolution: {integrity: sha512-0ovZz1lbe30QraG1cU+ji7EnQ8aUu+Hf3F+a8xPml3wPOyUQEK6CTdxV9kMecr9t+fIDrGeW7wgJTsM1DQE7Nw==} + + agora-rtm@2.2.0: + resolution: {integrity: sha512-I7nl9eKGC817fGZq3wzi92+xju0EYn/nR3LwCv2lA6d5Lzr5G4y1bhZBuWY29ZaypHzNdExtCBajl0skJi2uUw==} + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-regex@6.1.0: + resolution: {integrity: sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==} + engines: {node: '>=12'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + ansi-styles@6.2.1: + resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} + engines: {node: '>=12'} + + any-promise@1.3.0: + resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + arg@5.0.2: + resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + aria-hidden@1.2.4: + resolution: {integrity: sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==} + engines: {node: '>=10'} + + aria-query@5.3.2: + resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==} + engines: {node: '>= 0.4'} + + array-buffer-byte-length@1.0.1: + resolution: {integrity: sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==} + engines: {node: '>= 0.4'} + + array-includes@3.1.8: + resolution: {integrity: sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==} + engines: {node: '>= 0.4'} + + array.prototype.findlast@1.2.5: + resolution: {integrity: sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==} + engines: {node: '>= 0.4'} + + array.prototype.findlastindex@1.2.5: + resolution: {integrity: sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==} + engines: {node: '>= 0.4'} + + array.prototype.flat@1.3.2: + resolution: {integrity: sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==} + engines: {node: '>= 0.4'} + + array.prototype.flatmap@1.3.2: + resolution: {integrity: sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==} + engines: {node: '>= 0.4'} + + array.prototype.tosorted@1.1.4: + resolution: {integrity: sha512-p6Fx8B7b7ZhL/gmUsAy0D15WhvDccw3mnGNbZpi3pmeJdxtWsj2jEaI4Y6oo3XiHfzuSgPwKc04MYt6KgvC/wA==} + engines: {node: '>= 0.4'} + + arraybuffer.prototype.slice@1.0.3: + resolution: {integrity: sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==} + engines: {node: '>= 0.4'} + + ast-types-flow@0.0.8: + resolution: {integrity: sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + autoprefixer@10.4.20: + resolution: {integrity: sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + available-typed-arrays@1.0.7: + resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} + engines: {node: '>= 0.4'} + + axe-core@4.10.2: + resolution: {integrity: sha512-RE3mdQ7P3FRSe7eqCWoeQ/Z9QXrtniSjp1wUjt5nRC3WIpz5rSCve6o3fsZ2aCpJtrZjSZgjwXAoTO5k4tEI0w==} + engines: {node: '>=4'} + + axios@1.7.7: + resolution: {integrity: sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==} + + axobject-query@4.1.0: + resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==} + engines: {node: '>= 0.4'} + + babel-plugin-polyfill-corejs2@0.4.11: + resolution: {integrity: sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==} + peerDependencies: + '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 + + babel-plugin-polyfill-corejs3@0.10.6: + resolution: {integrity: sha512-b37+KR2i/khY5sKmWNVQAnitvquQbNdWy6lJdsr0kmquCKEEUgMKK4SboVM3HtfnZilfjr4MMQ7vY58FVWDtIA==} + peerDependencies: + '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 + + babel-plugin-polyfill-regenerator@0.6.2: + resolution: {integrity: sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==} + peerDependencies: + '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + bluebird@3.7.2: + resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.24.2: + resolution: {integrity: sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + busboy@1.6.0: + resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} + engines: {node: '>=10.16.0'} + + call-bind@1.0.7: + resolution: {integrity: sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==} + engines: {node: '>= 0.4'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelcase-css@2.0.1: + resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} + engines: {node: '>= 6'} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + caniuse-lite@1.0.30001680: + resolution: {integrity: sha512-rPQy70G6AGUMnbwS1z6Xg+RkHYPAi18ihs47GH0jcxIG7wArmPgY3XbS2sRdBbxJljp3thdT8BIqv9ccCypiPA==} + + catharsis@0.9.0: + resolution: {integrity: sha512-prMTQVpcns/tzFgFVkVp6ak6RykZyWb3gu8ckUpd6YkTlacOd3DXGJjIpD4Q6zJirizvaiAjSSHlOsA+6sNh2A==} + engines: {node: '>= 10'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + chokidar@3.6.0: + resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} + engines: {node: '>= 8.10.0'} + + chokidar@4.0.1: + resolution: {integrity: sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA==} + engines: {node: '>= 14.16.0'} + + class-variance-authority@0.7.0: + resolution: {integrity: sha512-jFI8IQw4hczaL4ALINxqLEXQbWcNjoSkloa4IaufXCJr6QawJyw7tuRysRsrE8w2p/4gGaxKIt/hX3qz/IbD1A==} + + client-only@0.0.1: + resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} + + clsx@2.0.0: + resolution: {integrity: sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==} + engines: {node: '>=6'} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + color-string@1.9.1: + resolution: {integrity: sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==} + + color@4.2.3: + resolution: {integrity: sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==} + engines: {node: '>=12.5.0'} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + commander@4.1.1: + resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} + engines: {node: '>= 6'} + + commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + core-js-compat@3.39.0: + resolution: {integrity: sha512-VgEUx3VwlExr5no0tXlBt+silBvhTryPwCXRI2Id1PN8WTKu7MreethvddqOubrYxkFdv/RnYrqlv1sFNAUelw==} + + cosmiconfig@8.3.6: + resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} + engines: {node: '>=14'} + peerDependencies: + typescript: '>=4.9.5' + peerDependenciesMeta: + typescript: + optional: true + + cross-spawn@7.0.5: + resolution: {integrity: sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug==} + engines: {node: '>= 8'} + + css-select@5.1.0: + resolution: {integrity: sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==} + + css-tree@2.2.1: + resolution: {integrity: sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0, npm: '>=7.0.0'} + + css-tree@2.3.1: + resolution: {integrity: sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + + css-what@6.1.0: + resolution: {integrity: sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==} + engines: {node: '>= 6'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csso@5.0.5: + resolution: {integrity: sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0, npm: '>=7.0.0'} + + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + + damerau-levenshtein@1.0.8: + resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} + + data-view-buffer@1.0.1: + resolution: {integrity: sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==} + engines: {node: '>= 0.4'} + + data-view-byte-length@1.0.1: + resolution: {integrity: sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==} + engines: {node: '>= 0.4'} + + data-view-byte-offset@1.0.0: + resolution: {integrity: sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==} + engines: {node: '>= 0.4'} + + debug@3.2.7: + resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + debug@4.3.7: + resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decode-uri-component@0.4.1: + resolution: {integrity: sha512-+8VxcR21HhTy8nOt6jf20w0c9CADrw1O8d+VZ/YzzCt4bJ3uBjw+D1q2osAB8RnpwwaeYBxy0HyKQxD5JBMuuQ==} + engines: {node: '>=14.16'} + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + define-data-property@1.1.4: + resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} + engines: {node: '>= 0.4'} + + define-properties@1.2.1: + resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} + engines: {node: '>= 0.4'} + + defu@6.1.4: + resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + destr@2.0.3: + resolution: {integrity: sha512-2N3BOUU4gYMpTP24s5rF5iP7BDr7uNTCs4ozw3kf/eKfvWSIu93GEBi5m427YoyJoeOzQ5smuu4nNAPGb8idSQ==} + + detect-libc@1.0.3: + resolution: {integrity: sha512-pGjwhsmsp4kL2RTz08wcOlGN83otlqHeD/Z5T8GXZB+/YcpQ/dgo+lbU8ZsGxV0HIvqqxo9l7mqYwyYMD9bKDg==} + engines: {node: '>=0.10'} + hasBin: true + + detect-libc@2.0.3: + resolution: {integrity: sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==} + engines: {node: '>=8'} + + detect-node-es@1.1.0: + resolution: {integrity: sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==} + + didyoumean@1.2.2: + resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + + dlv@1.1.3: + resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + + doctrine@2.1.0: + resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==} + engines: {node: '>=0.10.0'} + + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + + dom-serializer@2.0.0: + resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} + + domelementtype@2.3.0: + resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} + + domhandler@5.0.3: + resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} + engines: {node: '>= 4'} + + domutils@3.1.0: + resolution: {integrity: sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==} + + dot-case@3.0.4: + resolution: {integrity: sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==} + + eastasianwidth@0.2.0: + resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + + electron-to-chromium@1.5.55: + resolution: {integrity: sha512-6maZ2ASDOTBtjt9FhqYPRnbvKU5tjG0IN9SztUOWYw2AzNDNpKJYLJmlK0/En4Hs/aiWnB+JZ+gW19PIGszgKg==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + emoji-regex@9.2.2: + resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} + + enhanced-resolve@5.17.1: + resolution: {integrity: sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==} + engines: {node: '>=10.13.0'} + + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + + error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + + es-abstract@1.23.3: + resolution: {integrity: sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==} + engines: {node: '>= 0.4'} + + es-define-property@1.0.0: + resolution: {integrity: sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-iterator-helpers@1.2.0: + resolution: {integrity: sha512-tpxqxncxnpw3c93u8n3VOzACmRFoVmWJqbWXvX/JfKbkhBw1oslgPrUfeSt2psuqyEJFD6N/9lg5i7bsKpoq+Q==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.0.0: + resolution: {integrity: sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.0.3: + resolution: {integrity: sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==} + engines: {node: '>= 0.4'} + + es-shim-unscopables@1.0.2: + resolution: {integrity: sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==} + + es-to-primitive@1.2.1: + resolution: {integrity: sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==} + engines: {node: '>= 0.4'} + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-string-regexp@2.0.0: + resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} + engines: {node: '>=8'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + escodegen@1.14.3: + resolution: {integrity: sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==} + engines: {node: '>=4.0'} + hasBin: true + + eslint-config-next@15.0.3: + resolution: {integrity: sha512-IGP2DdQQrgjcr4mwFPve4DrCqo7CVVez1WoYY47XwKSrYO4hC0Dlb+iJA60i0YfICOzgNADIb8r28BpQ5Zs0wg==} + peerDependencies: + eslint: ^7.23.0 || ^8.0.0 || ^9.0.0 + typescript: '>=3.3.1' + peerDependenciesMeta: + typescript: + optional: true + + eslint-import-resolver-node@0.3.9: + resolution: {integrity: sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==} + + eslint-import-resolver-typescript@3.6.3: + resolution: {integrity: sha512-ud9aw4szY9cCT1EWWdGv1L1XR6hh2PaRWif0j2QjQ0pgTY/69iw+W0Z4qZv5wHahOl8isEr+k/JnyAqNQkLkIA==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + eslint: '*' + eslint-plugin-import: '*' + eslint-plugin-import-x: '*' + peerDependenciesMeta: + eslint-plugin-import: + optional: true + eslint-plugin-import-x: + optional: true + + eslint-module-utils@2.12.0: + resolution: {integrity: sha512-wALZ0HFoytlyh/1+4wuZ9FJCD/leWHQzzrxJ8+rebyReSLk7LApMyd3WJaLVoN+D5+WIdJyDK1c6JnE65V4Zyg==} + engines: {node: '>=4'} + peerDependencies: + '@typescript-eslint/parser': '*' + eslint: '*' + eslint-import-resolver-node: '*' + eslint-import-resolver-typescript: '*' + eslint-import-resolver-webpack: '*' + peerDependenciesMeta: + '@typescript-eslint/parser': + optional: true + eslint: + optional: true + eslint-import-resolver-node: + optional: true + eslint-import-resolver-typescript: + optional: true + eslint-import-resolver-webpack: + optional: true + + eslint-plugin-import@2.31.0: + resolution: {integrity: sha512-ixmkI62Rbc2/w8Vfxyh1jQRTdRTF52VxwRVHl/ykPAmqG+Nb7/kNn+byLP0LxPgI7zWA16Jt82SybJInmMia3A==} + engines: {node: '>=4'} + peerDependencies: + '@typescript-eslint/parser': '*' + eslint: ^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9 + peerDependenciesMeta: + '@typescript-eslint/parser': + optional: true + + eslint-plugin-jsx-a11y@6.10.2: + resolution: {integrity: sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q==} + engines: {node: '>=4.0'} + peerDependencies: + eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9 + + eslint-plugin-react-hooks@5.0.0: + resolution: {integrity: sha512-hIOwI+5hYGpJEc4uPRmz2ulCjAGD/N13Lukkh8cLV0i2IRk/bdZDYjgLVHj+U9Z704kLIdIO6iueGvxNur0sgw==} + engines: {node: '>=10'} + peerDependencies: + eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0 + + eslint-plugin-react@7.37.2: + resolution: {integrity: sha512-EsTAnj9fLVr/GZleBLFbj/sSuXeWmp1eXIN60ceYnZveqEaUCyW4X+Vh4WTdUhCkW4xutXYqTXCUSyqD4rB75w==} + engines: {node: '>=4'} + peerDependencies: + eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7 + + eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint@8.57.1: + resolution: {integrity: sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. + hasBin: true + + espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + + esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@4.3.0: + resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-glob@3.3.1: + resolution: {integrity: sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==} + engines: {node: '>=8.6.0'} + + fast-glob@3.3.2: + resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.17.1: + resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} + + fetch-blob@3.2.0: + resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} + engines: {node: ^12.20 || >= 14.13} + + file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + + flatted@3.3.1: + resolution: {integrity: sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==} + + follow-redirects@1.15.9: + resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + for-each@0.3.3: + resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} + + foreground-child@3.3.0: + resolution: {integrity: sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==} + engines: {node: '>=14'} + + form-data@4.0.1: + resolution: {integrity: sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==} + engines: {node: '>= 6'} + + formdata-polyfill@4.0.10: + resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} + engines: {node: '>=12.20.0'} + + fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + function.prototype.name@1.1.6: + resolution: {integrity: sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==} + engines: {node: '>= 0.4'} + + functions-have-names@1.2.3: + resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + get-intrinsic@1.2.4: + resolution: {integrity: sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==} + engines: {node: '>= 0.4'} + + get-nonce@1.0.1: + resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==} + engines: {node: '>=6'} + + get-symbol-description@1.0.2: + resolution: {integrity: sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==} + engines: {node: '>= 0.4'} + + get-tsconfig@4.8.1: + resolution: {integrity: sha512-k9PN+cFBmaLWtVz29SkUoqU5O0slLuHJXt/2P+tMVFT+phsSGXGkp9t3rQIqdz0e+06EHNGs3oM6ZX1s2zHxRg==} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} + hasBin: true + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + glob@8.1.0: + resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==} + engines: {node: '>=12'} + deprecated: Glob versions prior to v9 are no longer supported + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + globals@13.24.0: + resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} + engines: {node: '>=8'} + + globalthis@1.0.4: + resolution: {integrity: sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==} + engines: {node: '>= 0.4'} + + gopd@1.0.1: + resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + has-bigints@1.0.2: + resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-property-descriptors@1.0.2: + resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} + + has-proto@1.0.3: + resolution: {integrity: sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==} + engines: {node: '>= 0.4'} + + has-symbols@1.0.3: + resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + hoist-non-react-statics@3.3.2: + resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + immer@10.1.1: + resolution: {integrity: sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==} + + immutable@4.3.7: + resolution: {integrity: sha512-1hqclzwYwjRDFLjcFxOM5AYkkG0rpFPpr1RLPMEuGczoS7YA8gLhy8SWXYRAA/XwfEHpfo3cw5JGioS32fnMRw==} + + import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + internal-slot@1.0.7: + resolution: {integrity: sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==} + engines: {node: '>= 0.4'} + + invariant@2.2.4: + resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} + + is-array-buffer@3.0.4: + resolution: {integrity: sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==} + engines: {node: '>= 0.4'} + + is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + + is-arrayish@0.3.2: + resolution: {integrity: sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==} + + is-async-function@2.0.0: + resolution: {integrity: sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==} + engines: {node: '>= 0.4'} + + is-bigint@1.0.4: + resolution: {integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-boolean-object@1.1.2: + resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} + engines: {node: '>= 0.4'} + + is-bun-module@1.2.1: + resolution: {integrity: sha512-AmidtEM6D6NmUiLOvvU7+IePxjEjOzra2h0pSrsfSAcXwl/83zLLXDByafUJy9k/rKK0pvXMLdwKwGHlX2Ke6Q==} + + is-callable@1.2.7: + resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} + engines: {node: '>= 0.4'} + + is-core-module@2.15.1: + resolution: {integrity: sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==} + engines: {node: '>= 0.4'} + + is-data-view@1.0.1: + resolution: {integrity: sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==} + engines: {node: '>= 0.4'} + + is-date-object@1.0.5: + resolution: {integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==} + engines: {node: '>= 0.4'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-finalizationregistry@1.0.2: + resolution: {integrity: sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-generator-function@1.0.10: + resolution: {integrity: sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==} + engines: {node: '>= 0.4'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-map@2.0.3: + resolution: {integrity: sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==} + engines: {node: '>= 0.4'} + + is-negative-zero@2.0.3: + resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} + engines: {node: '>= 0.4'} + + is-number-object@1.0.7: + resolution: {integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==} + engines: {node: '>= 0.4'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + is-regex@1.1.4: + resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} + engines: {node: '>= 0.4'} + + is-set@2.0.3: + resolution: {integrity: sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==} + engines: {node: '>= 0.4'} + + is-shared-array-buffer@1.0.3: + resolution: {integrity: sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==} + engines: {node: '>= 0.4'} + + is-string@1.0.7: + resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} + engines: {node: '>= 0.4'} + + is-symbol@1.0.4: + resolution: {integrity: sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==} + engines: {node: '>= 0.4'} + + is-typed-array@1.1.13: + resolution: {integrity: sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==} + engines: {node: '>= 0.4'} + + is-weakmap@2.0.2: + resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==} + engines: {node: '>= 0.4'} + + is-weakref@1.0.2: + resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} + + is-weakset@2.0.3: + resolution: {integrity: sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==} + engines: {node: '>= 0.4'} + + isarray@2.0.5: + resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + iterator.prototype@1.1.3: + resolution: {integrity: sha512-FW5iMbeQ6rBGm/oKgzq2aW4KvAGpxPzYES8N4g4xNXUKpL1mclMvOe+76AcLDTvD+Ze+sOpVhgdAQEKF4L9iGQ==} + engines: {node: '>= 0.4'} + + jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} + + jiti@1.21.6: + resolution: {integrity: sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==} + hasBin: true + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + js2xmlparser@4.0.2: + resolution: {integrity: sha512-6n4D8gLlLf1n5mNLQPRfViYzu9RATblzPEtm1SthMX1Pjao0r9YI9nw7ZIfRxQMERS87mcswrg+r/OYrPRX6jA==} + + jsdoc@4.0.4: + resolution: {integrity: sha512-zeFezwyXeG4syyYHbvh1A967IAqq/67yXtXvuL5wnqCkFZe8I0vKfm+EO+YEvLguo6w9CDUbrAXVtJSHh2E8rw==} + engines: {node: '>=12.0.0'} + hasBin: true + + jsesc@3.0.2: + resolution: {integrity: sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==} + engines: {node: '>=6'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json5@1.0.2: + resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==} + hasBin: true + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + jsx-ast-utils@3.3.5: + resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==} + engines: {node: '>=4.0'} + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + klaw@3.0.0: + resolution: {integrity: sha512-0Fo5oir+O9jnXu5EefYbVK+mHMBeEVEy2cmctR1O1NECcCkPRreJKrS6Qt/j3KC2C148Dfo9i3pCmCMsdqGr0g==} + + language-subtag-registry@0.3.23: + resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==} + + language-tags@1.0.9: + resolution: {integrity: sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==} + engines: {node: '>=0.10'} + + levn@0.3.0: + resolution: {integrity: sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==} + engines: {node: '>= 0.8.0'} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + lilconfig@2.1.0: + resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} + engines: {node: '>=10'} + + lilconfig@3.1.2: + resolution: {integrity: sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==} + engines: {node: '>=14'} + + lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + linkify-it@5.0.0: + resolution: {integrity: sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash-es@4.17.21: + resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} + + lodash.debounce@4.0.8: + resolution: {integrity: sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + long@5.2.3: + resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==} + + loose-envify@1.4.0: + resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} + hasBin: true + + lower-case@2.0.2: + resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} + + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + lucide-react@0.453.0: + resolution: {integrity: sha512-kL+RGZCcJi9BvJtzg2kshO192Ddy9hv3ij+cPrVPWSRzgCWCVazoQJxOjAwgK53NomL07HB7GPHW120FimjNhQ==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc + + markdown-it-anchor@8.6.7: + resolution: {integrity: sha512-FlCHFwNnutLgVTflOYHPW2pPcl2AACqVzExlkGQNsi4CJgqOHN7YTgDd4LuhgN1BFO3TS0vLAruV1Td6dwWPJA==} + peerDependencies: + '@types/markdown-it': '*' + markdown-it: '*' + + markdown-it@14.1.0: + resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} + hasBin: true + + marked@4.3.0: + resolution: {integrity: sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==} + engines: {node: '>= 12'} + hasBin: true + + mdn-data@2.0.28: + resolution: {integrity: sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==} + + mdn-data@2.0.30: + resolution: {integrity: sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==} + + mdurl@2.0.0: + resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + mkdirp@1.0.4: + resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==} + engines: {node: '>=10'} + hasBin: true + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + mz@2.7.0: + resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + + nanoid@3.3.7: + resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + next-themes@0.3.0: + resolution: {integrity: sha512-/QHIrsYpd6Kfk7xakK4svpDI5mmXP0gfvCoJdGpZQ2TOrQZmsW0QxjaiLn8wbIKjtm4BTSqLoix4lxYYOnLJ/w==} + peerDependencies: + react: ^16.8 || ^17 || ^18 + react-dom: ^16.8 || ^17 || ^18 + + next@15.0.3: + resolution: {integrity: sha512-ontCbCRKJUIoivAdGB34yCaOcPgYXr9AAkV/IwqFfWWTXEPUgLYkSkqBhIk9KK7gGmgjc64B+RdoeIDM13Irnw==} + engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} + hasBin: true + peerDependencies: + '@opentelemetry/api': ^1.1.0 + '@playwright/test': ^1.41.2 + babel-plugin-react-compiler: '*' + react: ^18.2.0 || 19.0.0-rc-66855b96-20241106 + react-dom: ^18.2.0 || 19.0.0-rc-66855b96-20241106 + sass: ^1.3.0 + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@playwright/test': + optional: true + babel-plugin-react-compiler: + optional: true + sass: + optional: true + + no-case@3.0.4: + resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} + + node-addon-api@7.1.1: + resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==} + + node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + + node-releases@2.0.18: + resolution: {integrity: sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-hash@3.0.0: + resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} + engines: {node: '>= 6'} + + object-inspect@1.13.3: + resolution: {integrity: sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==} + engines: {node: '>= 0.4'} + + object-keys@1.1.1: + resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} + engines: {node: '>= 0.4'} + + object.assign@4.1.5: + resolution: {integrity: sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==} + engines: {node: '>= 0.4'} + + object.entries@1.1.8: + resolution: {integrity: sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==} + engines: {node: '>= 0.4'} + + object.fromentries@2.0.8: + resolution: {integrity: sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==} + engines: {node: '>= 0.4'} + + object.groupby@1.0.3: + resolution: {integrity: sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==} + engines: {node: '>= 0.4'} + + object.values@1.2.0: + resolution: {integrity: sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==} + engines: {node: '>= 0.4'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + optionator@0.8.3: + resolution: {integrity: sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==} + engines: {node: '>= 0.8.0'} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-is-promise@4.0.0: + resolution: {integrity: sha512-4G3B+86qsIAX/+ip/yhHX9WUcyFKYkQYtE5bGkjpZyGK0Re53RbHky2UKt6RQVkDbUXb8EJRb4iga2SaI360nQ==} + engines: {node: '>=12'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + package-json-from-dist@1.0.1: + resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + + pako@2.1.0: + resolution: {integrity: sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug==} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-scurry@1.11.1: + resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} + engines: {node: '>=16 || 14 >=14.18'} + + path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + pify@2.3.0: + resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} + engines: {node: '>=0.10.0'} + + pirates@4.0.6: + resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==} + engines: {node: '>= 6'} + + possible-typed-array-names@1.0.0: + resolution: {integrity: sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==} + engines: {node: '>= 0.4'} + + postcss-import@15.1.0: + resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==} + engines: {node: '>=14.0.0'} + peerDependencies: + postcss: ^8.0.0 + + postcss-js@4.0.1: + resolution: {integrity: sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==} + engines: {node: ^12 || ^14 || >= 16} + peerDependencies: + postcss: ^8.4.21 + + postcss-load-config@4.0.2: + resolution: {integrity: sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==} + engines: {node: '>= 14'} + peerDependencies: + postcss: '>=8.0.9' + ts-node: '>=9.0.0' + peerDependenciesMeta: + postcss: + optional: true + ts-node: + optional: true + + postcss-nested@6.2.0: + resolution: {integrity: sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + + postcss-selector-parser@6.1.2: + resolution: {integrity: sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==} + engines: {node: '>=4'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.4.31: + resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} + engines: {node: ^10 || ^12 || >=14} + + postcss@8.4.48: + resolution: {integrity: sha512-GCRK8F6+Dl7xYniR5a4FYbpBzU8XnZVeowqsQFYdcXuSbChgiks7qybSkbvnaeqv0G0B+dd9/jJgH8kkLDQeEA==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.1.2: + resolution: {integrity: sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==} + engines: {node: '>= 0.8.0'} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier-plugin-tailwindcss@0.6.8: + resolution: {integrity: sha512-dGu3kdm7SXPkiW4nzeWKCl3uoImdd5CTZEJGxyypEPL37Wj0HT2pLqjrvSei1nTeuQfO4PUfjeW5cTUNRLZ4sA==} + engines: {node: '>=14.21.3'} + peerDependencies: + '@ianvs/prettier-plugin-sort-imports': '*' + '@prettier/plugin-pug': '*' + '@shopify/prettier-plugin-liquid': '*' + '@trivago/prettier-plugin-sort-imports': '*' + '@zackad/prettier-plugin-twig-melody': '*' + prettier: ^3.0 + prettier-plugin-astro: '*' + prettier-plugin-css-order: '*' + prettier-plugin-import-sort: '*' + prettier-plugin-jsdoc: '*' + prettier-plugin-marko: '*' + prettier-plugin-multiline-arrays: '*' + prettier-plugin-organize-attributes: '*' + prettier-plugin-organize-imports: '*' + prettier-plugin-sort-imports: '*' + prettier-plugin-style-order: '*' + prettier-plugin-svelte: '*' + peerDependenciesMeta: + '@ianvs/prettier-plugin-sort-imports': + optional: true + '@prettier/plugin-pug': + optional: true + '@shopify/prettier-plugin-liquid': + optional: true + '@trivago/prettier-plugin-sort-imports': + optional: true + '@zackad/prettier-plugin-twig-melody': + optional: true + prettier-plugin-astro: + optional: true + prettier-plugin-css-order: + optional: true + prettier-plugin-import-sort: + optional: true + prettier-plugin-jsdoc: + optional: true + prettier-plugin-marko: + optional: true + prettier-plugin-multiline-arrays: + optional: true + prettier-plugin-organize-attributes: + optional: true + prettier-plugin-organize-imports: + optional: true + prettier-plugin-sort-imports: + optional: true + prettier-plugin-style-order: + optional: true + prettier-plugin-svelte: + optional: true + + prettier@3.3.3: + resolution: {integrity: sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==} + engines: {node: '>=14'} + hasBin: true + + prop-types@15.8.1: + resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==} + + protobufjs-cli@1.1.3: + resolution: {integrity: sha512-MqD10lqF+FMsOayFiNOdOGNlXc4iKDCf0ZQPkPR+gizYh9gqUeGTWulABUCdI+N67w5RfJ6xhgX4J8pa8qmMXQ==} + engines: {node: '>=12.0.0'} + hasBin: true + peerDependencies: + protobufjs: ^7.0.0 + + protobufjs@7.4.0: + resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==} + engines: {node: '>=12.0.0'} + + proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + + punycode.js@2.3.1: + resolution: {integrity: sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==} + engines: {node: '>=6'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + react-colorful@5.6.1: + resolution: {integrity: sha512-1exovf0uGTGyq5mXQT0zgQ80uvj2PCwvF8zY1RN9/vbJVSjSo3fsB/4L3ObbF7u70NduSiK4xu4Y6q1MHoUGEw==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + react-dom@18.3.1: + resolution: {integrity: sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==} + peerDependencies: + react: ^18.3.1 + + react-hook-form@7.53.2: + resolution: {integrity: sha512-YVel6fW5sOeedd1524pltpHX+jgU2u3DSDtXEaBORNdqiNrsX/nUI/iGXONegttg0mJVnfrIkiV0cmTU6Oo2xw==} + engines: {node: '>=18.0.0'} + peerDependencies: + react: ^16.8.0 || ^17 || ^18 || ^19 + + react-is@16.13.1: + resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} + + react-redux@9.1.2: + resolution: {integrity: sha512-0OA4dhM1W48l3uzmv6B7TXPCGmokUU4p1M44DGN2/D9a1FjVPukVjER1PcPX97jIg6aUeLq1XJo1IpfbgULn0w==} + peerDependencies: + '@types/react': ^18.2.25 + react: ^18.0 + redux: ^5.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + redux: + optional: true + + react-remove-scroll-bar@2.3.6: + resolution: {integrity: sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + + react-remove-scroll@2.6.0: + resolution: {integrity: sha512-I2U4JVEsQenxDAKaVa3VZ/JeJZe0/2DxPWL8Tj8yLKctQJQiZM52pn/GWFpSp8dftjM3pSAHVJZscAnC/y+ySQ==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + + react-style-singleton@2.2.1: + resolution: {integrity: sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + + react@18.3.1: + resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==} + engines: {node: '>=0.10.0'} + + read-cache@1.0.0: + resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + readdirp@4.0.2: + resolution: {integrity: sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA==} + engines: {node: '>= 14.16.0'} + + redux-thunk@3.1.0: + resolution: {integrity: sha512-NW2r5T6ksUKXCabzhL9z+h206HQw/NJkcLm1GPImRQ8IzfXwRGqjVhKJGauHirT0DAuyy6hjdnMZaRoAcy0Klw==} + peerDependencies: + redux: ^5.0.0 + + redux@4.2.1: + resolution: {integrity: sha512-LAUYz4lc+Do8/g7aeRa8JkyDErK6ekstQaqWQrNRW//MY1TvCEpMtpTWvlQ+FPbWCx+Xixu/6SHt5N0HR+SB4w==} + + redux@5.0.1: + resolution: {integrity: sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==} + + reflect.getprototypeof@1.0.6: + resolution: {integrity: sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==} + engines: {node: '>= 0.4'} + + regenerate-unicode-properties@10.2.0: + resolution: {integrity: sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==} + engines: {node: '>=4'} + + regenerate@1.4.2: + resolution: {integrity: sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==} + + regenerator-runtime@0.14.1: + resolution: {integrity: sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==} + + regenerator-transform@0.15.2: + resolution: {integrity: sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==} + + regexp.prototype.flags@1.5.3: + resolution: {integrity: sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ==} + engines: {node: '>= 0.4'} + + regexpu-core@6.1.1: + resolution: {integrity: sha512-k67Nb9jvwJcJmVpw0jPttR1/zVfnKf8Km0IPatrU/zJ5XeG3+Slx0xLXs9HByJSzXzrlz5EDvN6yLNMDc2qdnw==} + engines: {node: '>=4'} + + regjsgen@0.8.0: + resolution: {integrity: sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==} + + regjsparser@0.11.2: + resolution: {integrity: sha512-3OGZZ4HoLJkkAZx/48mTXJNlmqTGOzc0o9OWQPuWpkOlXXPbyN6OafCcoXUnBqE2D3f/T5L+pWc1kdEmnfnRsA==} + hasBin: true + + requizzle@0.2.4: + resolution: {integrity: sha512-JRrFk1D4OQ4SqovXOgdav+K8EAhSB/LJZqCz8tbX0KObcdeM15Ss59ozWMBWmmINMagCwmqn4ZNryUGpBsl6Jw==} + + reselect@5.1.1: + resolution: {integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + resolve-pkg-maps@1.0.0: + resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + + resolve@1.22.8: + resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} + hasBin: true + + resolve@2.0.0-next.5: + resolution: {integrity: sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==} + hasBin: true + + reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + deprecated: Rimraf versions prior to v4 are no longer supported + hasBin: true + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + safe-array-concat@1.1.2: + resolution: {integrity: sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==} + engines: {node: '>=0.4'} + + safe-regex-test@1.0.3: + resolution: {integrity: sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==} + engines: {node: '>= 0.4'} + + sass@1.80.6: + resolution: {integrity: sha512-ccZgdHNiBF1NHBsWvacvT5rju3y1d/Eu+8Ex6c21nHp2lZGLBEtuwc415QfiI1PJa1TpCo3iXwwSRjRpn2Ckjg==} + engines: {node: '>=14.0.0'} + hasBin: true + + scheduler@0.23.2: + resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==} + + sdp@3.2.0: + resolution: {integrity: sha512-d7wDPgDV3DDiqulJjKiV2865wKsJ34YI+NDREbm+FySq6WuKOikwyNQcm+doLAZ1O6ltdO0SeKle2xMpN3Brgw==} + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.6.3: + resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} + engines: {node: '>=10'} + hasBin: true + + set-function-length@1.2.2: + resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==} + engines: {node: '>= 0.4'} + + set-function-name@2.0.2: + resolution: {integrity: sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==} + engines: {node: '>= 0.4'} + + sharp@0.33.5: + resolution: {integrity: sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + side-channel@1.0.6: + resolution: {integrity: sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==} + engines: {node: '>= 0.4'} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + + simple-swizzle@0.2.2: + resolution: {integrity: sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==} + + snake-case@3.0.4: + resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} + + sonner@1.7.0: + resolution: {integrity: sha512-W6dH7m5MujEPyug3lpI2l3TC3Pp1+LTgK0Efg+IHDrBbtEjyCmCHHo6yfNBOsf1tFZ6zf+jceWwB38baC8yO9g==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 || ^19.0.0-rc + react-dom: ^18.0.0 || ^19.0.0 || ^19.0.0-rc + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + split-on-first@3.0.0: + resolution: {integrity: sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==} + engines: {node: '>=12'} + + streamsearch@1.1.0: + resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} + engines: {node: '>=10.0.0'} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string-width@5.1.2: + resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} + engines: {node: '>=12'} + + string.prototype.includes@2.0.1: + resolution: {integrity: sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==} + engines: {node: '>= 0.4'} + + string.prototype.matchall@4.0.11: + resolution: {integrity: sha512-NUdh0aDavY2og7IbBPenWqR9exH+E26Sv8e0/eTe1tltDGZL+GtBkDAnnyBtmekfK6/Dq3MkcGtzXFEd1LQrtg==} + engines: {node: '>= 0.4'} + + string.prototype.repeat@1.0.0: + resolution: {integrity: sha512-0u/TldDbKD8bFCQ/4f5+mNRrXwZ8hg2w7ZR8wa16e8z9XpePWl3eGEcUD0OXpEH/VJH/2G3gjUtR3ZOiBe2S/w==} + + string.prototype.trim@1.2.9: + resolution: {integrity: sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==} + engines: {node: '>= 0.4'} + + string.prototype.trimend@1.0.8: + resolution: {integrity: sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==} + + string.prototype.trimstart@1.0.8: + resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==} + engines: {node: '>= 0.4'} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-ansi@7.1.0: + resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} + engines: {node: '>=12'} + + strip-bom@3.0.0: + resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} + engines: {node: '>=4'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + styled-jsx@5.1.6: + resolution: {integrity: sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==} + engines: {node: '>= 12.0.0'} + peerDependencies: + '@babel/core': '*' + babel-plugin-macros: '*' + react: '>= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0' + peerDependenciesMeta: + '@babel/core': + optional: true + babel-plugin-macros: + optional: true + + sucrase@3.35.0: + resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} + engines: {node: '>=16 || 14 >=14.17'} + hasBin: true + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + svg-parser@2.0.4: + resolution: {integrity: sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==} + + svgo@3.3.2: + resolution: {integrity: sha512-OoohrmuUlBs8B8o6MB2Aevn+pRIH9zDALSR+6hhqVfa6fRwG/Qw9VUMSMW9VNg2CFc/MTIfabtdOVl9ODIJjpw==} + engines: {node: '>=14.0.0'} + hasBin: true + + swr@2.2.5: + resolution: {integrity: sha512-QtxqyclFeAsxEUeZIYmsaQ0UjimSq1RZ9Un7I68/0ClKK/U3LoyQunwkQfJZr2fc22DfIXLNDc2wFyTEikCUpg==} + peerDependencies: + react: ^16.11.0 || ^17.0.0 || ^18.0.0 + + tailwind-merge@2.5.4: + resolution: {integrity: sha512-0q8cfZHMu9nuYP/b5Shb7Y7Sh1B7Nnl5GqNr1U+n2p6+mybvRtayrQ+0042Z5byvTA8ihjlP8Odo8/VnHbZu4Q==} + + tailwindcss-animate@1.0.7: + resolution: {integrity: sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==} + peerDependencies: + tailwindcss: '>=3.0.0 || insiders' + + tailwindcss@3.4.14: + resolution: {integrity: sha512-IcSvOcTRcUtQQ7ILQL5quRDg7Xs93PdJEk1ZLbhhvJc7uj/OAhYOnruEiwnGgBvUtaUAJ8/mhSw1o8L2jCiENA==} + engines: {node: '>=14.0.0'} + hasBin: true + + tapable@2.2.1: + resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==} + engines: {node: '>=6'} + + text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + thenify-all@1.6.0: + resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} + engines: {node: '>=0.8'} + + thenify@3.3.1: + resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} + + tmp@0.2.3: + resolution: {integrity: sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==} + engines: {node: '>=14.14'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + ts-api-utils@1.4.0: + resolution: {integrity: sha512-032cPxaEKwM+GT3vA5JXNzIaizx388rhsSW79vGRNGXfRRAdEAn2mvk36PvK5HnOchyWZ7afLEXqYCvPCrzuzQ==} + engines: {node: '>=16'} + peerDependencies: + typescript: '>=4.2.0' + + ts-interface-checker@0.1.13: + resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} + + tsconfig-paths@3.15.0: + resolution: {integrity: sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==} + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + type-check@0.3.2: + resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} + engines: {node: '>= 0.8.0'} + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + typed-array-buffer@1.0.2: + resolution: {integrity: sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==} + engines: {node: '>= 0.4'} + + typed-array-byte-length@1.0.1: + resolution: {integrity: sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==} + engines: {node: '>= 0.4'} + + typed-array-byte-offset@1.0.2: + resolution: {integrity: sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==} + engines: {node: '>= 0.4'} + + typed-array-length@1.0.6: + resolution: {integrity: sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==} + engines: {node: '>= 0.4'} + + typescript@5.6.3: + resolution: {integrity: sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==} + engines: {node: '>=14.17'} + hasBin: true + + ua-parser-js@0.7.39: + resolution: {integrity: sha512-IZ6acm6RhQHNibSt7+c09hhvsKy9WUr4DVbeq9U8o71qxyYtJpQeDxQnMrVqnIFMLcQjHO0I9wgfO2vIahht4w==} + hasBin: true + + uc.micro@2.1.0: + resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==} + + uglify-js@3.19.3: + resolution: {integrity: sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==} + engines: {node: '>=0.8.0'} + hasBin: true + + unbox-primitive@1.0.2: + resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} + + underscore@1.13.7: + resolution: {integrity: sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g==} + + undici-types@6.19.8: + resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} + + unicode-canonical-property-names-ecmascript@2.0.1: + resolution: {integrity: sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==} + engines: {node: '>=4'} + + unicode-match-property-ecmascript@2.0.0: + resolution: {integrity: sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==} + engines: {node: '>=4'} + + unicode-match-property-value-ecmascript@2.2.0: + resolution: {integrity: sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==} + engines: {node: '>=4'} + + unicode-property-aliases-ecmascript@2.1.0: + resolution: {integrity: sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==} + engines: {node: '>=4'} + + update-browserslist-db@1.1.1: + resolution: {integrity: sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + use-callback-ref@1.3.2: + resolution: {integrity: sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + + use-sidecar@1.1.2: + resolution: {integrity: sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.9.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + + use-sync-external-store@1.2.2: + resolution: {integrity: sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + web-streams-polyfill@3.3.3: + resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} + engines: {node: '>= 8'} + + webrtc-adapter@8.2.0: + resolution: {integrity: sha512-umxCMgedPAVq4Pe/jl3xmelLXLn4XZWFEMR5Iipb5wJ+k1xMX0yC4ZY9CueZUU1MjapFxai1tFGE7R/kotH6Ww==} + engines: {node: '>=6.0.0', npm: '>=3.10.0'} + + which-boxed-primitive@1.0.2: + resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==} + + which-builtin-type@1.1.4: + resolution: {integrity: sha512-bppkmBSsHFmIMSl8BO9TbsyzsvGjVoppt8xUiGzwiu/bhDCGxnpOKCxgqj6GuyHE0mINMDecBFPlOm2hzY084w==} + engines: {node: '>= 0.4'} + + which-collection@1.0.2: + resolution: {integrity: sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==} + engines: {node: '>= 0.4'} + + which-typed-array@1.1.15: + resolution: {integrity: sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==} + engines: {node: '>= 0.4'} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrap-ansi@8.1.0: + resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} + engines: {node: '>=12'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + xmlcreate@2.0.4: + resolution: {integrity: sha512-nquOebG4sngPmGPICTS5EnxqhKbCmz5Ox5hsszI2T6U5qdrJizBc+0ilYSEjTSzU0yZcmvppztXe/5Al5fUwdg==} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yaml@2.6.0: + resolution: {integrity: sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ==} + engines: {node: '>= 14'} + hasBin: true + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zod@3.23.8: + resolution: {integrity: sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==} + +snapshots: + + '@agora-js/media@4.22.0': + dependencies: + '@agora-js/report': 4.22.0 + '@agora-js/shared': 4.22.0 + agora-rte-extension: 1.2.4 + axios: 1.7.7 + webrtc-adapter: 8.2.0 + transitivePeerDependencies: + - debug + optional: true + + '@agora-js/media@4.22.2': + dependencies: + '@agora-js/report': 4.22.2 + '@agora-js/shared': 4.22.2 + agora-rte-extension: 1.2.4 + axios: 1.7.7 + webrtc-adapter: 8.2.0 + transitivePeerDependencies: + - debug + + '@agora-js/report@4.22.0': + dependencies: + '@agora-js/shared': 4.22.0 + axios: 1.7.7 + transitivePeerDependencies: + - debug + optional: true + + '@agora-js/report@4.22.2': + dependencies: + '@agora-js/shared': 4.22.2 + axios: 1.7.7 + transitivePeerDependencies: + - debug + + '@agora-js/shared@4.22.0': + dependencies: + axios: 1.7.7 + ua-parser-js: 0.7.39 + transitivePeerDependencies: + - debug + optional: true + + '@agora-js/shared@4.22.2': + dependencies: + axios: 1.7.7 + ua-parser-js: 0.7.39 + transitivePeerDependencies: + - debug + + '@alloc/quick-lru@5.2.0': {} + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.26.2': + dependencies: + '@babel/helper-validator-identifier': 7.25.9 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/compat-data@7.26.2': {} + + '@babel/core@7.26.0': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.2 + '@babel/helper-compilation-targets': 7.25.9 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) + '@babel/helpers': 7.26.0 + '@babel/parser': 7.26.2 + '@babel/template': 7.25.9 + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + convert-source-map: 2.0.0 + debug: 4.3.7 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.26.2': + dependencies: + '@babel/parser': 7.26.2 + '@babel/types': 7.26.0 + '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.0.2 + + '@babel/helper-annotate-as-pure@7.25.9': + dependencies: + '@babel/types': 7.26.0 + + '@babel/helper-builder-binary-assignment-operator-visitor@7.25.9': + dependencies: + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-compilation-targets@7.25.9': + dependencies: + '@babel/compat-data': 7.26.2 + '@babel/helper-validator-option': 7.25.9 + browserslist: 4.24.2 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-create-class-features-plugin@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-member-expression-to-functions': 7.25.9 + '@babel/helper-optimise-call-expression': 7.25.9 + '@babel/helper-replace-supers': 7.25.9(@babel/core@7.26.0) + '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 + '@babel/traverse': 7.25.9 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/helper-create-regexp-features-plugin@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + regexpu-core: 6.1.1 + semver: 6.3.1 + + '@babel/helper-define-polyfill-provider@0.6.2(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-compilation-targets': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + debug: 4.3.7 + lodash.debounce: 4.0.8 + resolve: 1.22.8 + transitivePeerDependencies: + - supports-color + + '@babel/helper-member-expression-to-functions@7.25.9': + dependencies: + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-imports@7.25.9': + dependencies: + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-optimise-call-expression@7.25.9': + dependencies: + '@babel/types': 7.26.0 + + '@babel/helper-plugin-utils@7.25.9': {} + + '@babel/helper-remap-async-to-generator@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-wrap-function': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-replace-supers@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-member-expression-to-functions': 7.25.9 + '@babel/helper-optimise-call-expression': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-simple-access@7.25.9': + dependencies: + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-skip-transparent-expression-wrappers@7.25.9': + dependencies: + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-string-parser@7.25.9': {} + + '@babel/helper-validator-identifier@7.25.9': {} + + '@babel/helper-validator-option@7.25.9': {} + + '@babel/helper-wrap-function@7.25.9': + dependencies: + '@babel/template': 7.25.9 + '@babel/traverse': 7.25.9 + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/helpers@7.26.0': + dependencies: + '@babel/template': 7.25.9 + '@babel/types': 7.26.0 + + '@babel/parser@7.26.2': + dependencies: + '@babel/types': 7.26.0 + + '@babel/plugin-bugfix-firefox-class-in-computed-class-key@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-bugfix-safari-class-field-initializer-scope@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 + '@babel/plugin-transform-optional-chaining': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@babel/plugin-syntax-import-assertions@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-syntax-import-attributes@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-syntax-typescript@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-syntax-unicode-sets-regex@7.18.6(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-arrow-functions@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-async-generator-functions@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-remap-async-to-generator': 7.25.9(@babel/core@7.26.0) + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-async-to-generator@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-remap-async-to-generator': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-block-scoped-functions@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-block-scoping@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-class-properties@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-class-static-block@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-classes@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-compilation-targets': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-replace-supers': 7.25.9(@babel/core@7.26.0) + '@babel/traverse': 7.25.9 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-computed-properties@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/template': 7.25.9 + + '@babel/plugin-transform-destructuring@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-dotall-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-duplicate-keys@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-duplicate-named-capturing-groups-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-dynamic-import@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-exponentiation-operator@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-builder-binary-assignment-operator-visitor': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-export-namespace-from@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-for-of@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-function-name@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-compilation-targets': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-json-strings@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-literals@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-logical-assignment-operators@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-member-expression-literals@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-modules-amd@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-modules-commonjs@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-simple-access': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-modules-systemjs@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + '@babel/traverse': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-modules-umd@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-named-capturing-groups-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-new-target@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-nullish-coalescing-operator@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-numeric-separator@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-object-rest-spread@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-compilation-targets': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/plugin-transform-parameters': 7.25.9(@babel/core@7.26.0) + + '@babel/plugin-transform-object-super@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-replace-supers': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-optional-catch-binding@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-optional-chaining@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-parameters@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-private-methods@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-private-property-in-object@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-property-literals@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-react-constant-elements@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-react-display-name@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-react-jsx-development@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/plugin-transform-react-jsx': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-react-jsx@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) + '@babel/types': 7.26.0 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-react-pure-annotations@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-regenerator@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + regenerator-transform: 0.15.2 + + '@babel/plugin-transform-regexp-modifiers@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-reserved-words@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-shorthand-properties@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-spread@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-sticky-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-template-literals@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-typeof-symbol@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-typescript@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-annotate-as-pure': 7.25.9 + '@babel/helper-create-class-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-skip-transparent-expression-wrappers': 7.25.9 + '@babel/plugin-syntax-typescript': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/plugin-transform-unicode-escapes@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-unicode-property-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-unicode-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/plugin-transform-unicode-sets-regex@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-create-regexp-features-plugin': 7.25.9(@babel/core@7.26.0) + '@babel/helper-plugin-utils': 7.25.9 + + '@babel/preset-env@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/compat-data': 7.26.2 + '@babel/core': 7.26.0 + '@babel/helper-compilation-targets': 7.25.9 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-validator-option': 7.25.9 + '@babel/plugin-bugfix-firefox-class-in-computed-class-key': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-bugfix-safari-class-field-initializer-scope': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-proposal-private-property-in-object': 7.21.0-placeholder-for-preset-env.2(@babel/core@7.26.0) + '@babel/plugin-syntax-import-assertions': 7.26.0(@babel/core@7.26.0) + '@babel/plugin-syntax-import-attributes': 7.26.0(@babel/core@7.26.0) + '@babel/plugin-syntax-unicode-sets-regex': 7.18.6(@babel/core@7.26.0) + '@babel/plugin-transform-arrow-functions': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-async-generator-functions': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-async-to-generator': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-block-scoped-functions': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-block-scoping': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-class-properties': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-class-static-block': 7.26.0(@babel/core@7.26.0) + '@babel/plugin-transform-classes': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-computed-properties': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-destructuring': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-dotall-regex': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-duplicate-keys': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-duplicate-named-capturing-groups-regex': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-dynamic-import': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-exponentiation-operator': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-export-namespace-from': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-for-of': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-function-name': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-json-strings': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-logical-assignment-operators': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-member-expression-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-modules-amd': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-modules-commonjs': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-modules-systemjs': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-modules-umd': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-named-capturing-groups-regex': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-new-target': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-nullish-coalescing-operator': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-numeric-separator': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-object-rest-spread': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-object-super': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-optional-catch-binding': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-optional-chaining': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-parameters': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-private-methods': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-private-property-in-object': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-property-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-regenerator': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-regexp-modifiers': 7.26.0(@babel/core@7.26.0) + '@babel/plugin-transform-reserved-words': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-shorthand-properties': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-spread': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-sticky-regex': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-template-literals': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-typeof-symbol': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-unicode-escapes': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-unicode-property-regex': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-unicode-regex': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-unicode-sets-regex': 7.25.9(@babel/core@7.26.0) + '@babel/preset-modules': 0.1.6-no-external-plugins(@babel/core@7.26.0) + babel-plugin-polyfill-corejs2: 0.4.11(@babel/core@7.26.0) + babel-plugin-polyfill-corejs3: 0.10.6(@babel/core@7.26.0) + babel-plugin-polyfill-regenerator: 0.6.2(@babel/core@7.26.0) + core-js-compat: 3.39.0 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/preset-modules@0.1.6-no-external-plugins(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/types': 7.26.0 + esutils: 2.0.3 + + '@babel/preset-react@7.25.9(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-validator-option': 7.25.9 + '@babel/plugin-transform-react-display-name': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-react-jsx': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-react-jsx-development': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-react-pure-annotations': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/preset-typescript@7.26.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-plugin-utils': 7.25.9 + '@babel/helper-validator-option': 7.25.9 + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-modules-commonjs': 7.25.9(@babel/core@7.26.0) + '@babel/plugin-transform-typescript': 7.25.9(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + '@babel/runtime@7.26.0': + dependencies: + regenerator-runtime: 0.14.1 + + '@babel/template@7.25.9': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/parser': 7.26.2 + '@babel/types': 7.26.0 + + '@babel/traverse@7.25.9': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.26.2 + '@babel/parser': 7.26.2 + '@babel/template': 7.25.9 + '@babel/types': 7.26.0 + debug: 4.3.7 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.26.0': + dependencies: + '@babel/helper-string-parser': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + + '@emnapi/runtime@1.3.1': + dependencies: + tslib: 2.8.1 + optional: true + + '@eslint-community/eslint-utils@4.4.1(eslint@8.57.1)': + dependencies: + eslint: 8.57.1 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.1': {} + + '@eslint/eslintrc@2.1.4': + dependencies: + ajv: 6.12.6 + debug: 4.3.7 + espree: 9.6.1 + globals: 13.24.0 + ignore: 5.3.2 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@8.57.1': {} + + '@floating-ui/core@1.6.8': + dependencies: + '@floating-ui/utils': 0.2.8 + + '@floating-ui/dom@1.6.12': + dependencies: + '@floating-ui/core': 1.6.8 + '@floating-ui/utils': 0.2.8 + + '@floating-ui/react-dom@2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@floating-ui/dom': 1.6.12 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + + '@floating-ui/utils@0.2.8': {} + + '@hookform/resolvers@3.9.1(react-hook-form@7.53.2(react@18.3.1))': + dependencies: + react-hook-form: 7.53.2(react@18.3.1) + + '@humanwhocodes/config-array@0.13.0': + dependencies: + '@humanwhocodes/object-schema': 2.0.3 + debug: 4.3.7 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/object-schema@2.0.3': {} + + '@img/sharp-darwin-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.0.4 + optional: true + + '@img/sharp-darwin-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.0.4 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-darwin-x64@1.0.4': + optional: true + + '@img/sharp-libvips-linux-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-linux-arm@1.0.5': + optional: true + + '@img/sharp-libvips-linux-s390x@1.0.4': + optional: true + + '@img/sharp-libvips-linux-x64@1.0.4': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.0.4': + optional: true + + '@img/sharp-linux-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.0.4 + optional: true + + '@img/sharp-linux-arm@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.0.5 + optional: true + + '@img/sharp-linux-s390x@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.0.4 + optional: true + + '@img/sharp-linux-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.0.4 + optional: true + + '@img/sharp-linuxmusl-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 + optional: true + + '@img/sharp-linuxmusl-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.0.4 + optional: true + + '@img/sharp-wasm32@0.33.5': + dependencies: + '@emnapi/runtime': 1.3.1 + optional: true + + '@img/sharp-win32-ia32@0.33.5': + optional: true + + '@img/sharp-win32-x64@0.33.5': + optional: true + + '@isaacs/cliui@8.0.2': + dependencies: + string-width: 5.1.2 + string-width-cjs: string-width@4.2.3 + strip-ansi: 7.1.0 + strip-ansi-cjs: strip-ansi@6.0.1 + wrap-ansi: 8.1.0 + wrap-ansi-cjs: wrap-ansi@7.0.0 + + '@jridgewell/gen-mapping@0.3.5': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@jsdoc/salty@0.2.8': + dependencies: + lodash: 4.17.21 + + '@minko-fe/lodash-pro@0.3.3': + dependencies: + '@types/lodash-es': 4.17.12 + deepmerge: 4.3.1 + defu: 6.1.4 + destr: 2.0.3 + lodash-es: 4.17.21 + p-is-promise: 4.0.0 + + '@minko-fe/postcss-pxtoviewport@1.5.0(postcss@8.4.48)': + dependencies: + '@minko-fe/lodash-pro': 0.3.3 + decode-uri-component: 0.4.1 + postcss: 8.4.48 + split-on-first: 3.0.0 + + '@next/env@15.0.3': {} + + '@next/eslint-plugin-next@15.0.3': + dependencies: + fast-glob: 3.3.1 + + '@next/swc-darwin-arm64@15.0.3': + optional: true + + '@next/swc-darwin-x64@15.0.3': + optional: true + + '@next/swc-linux-arm64-gnu@15.0.3': + optional: true + + '@next/swc-linux-arm64-musl@15.0.3': + optional: true + + '@next/swc-linux-x64-gnu@15.0.3': + optional: true + + '@next/swc-linux-x64-musl@15.0.3': + optional: true + + '@next/swc-win32-arm64-msvc@15.0.3': + optional: true + + '@next/swc-win32-x64-msvc@15.0.3': + optional: true + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.17.1 + + '@nolyfill/is-core-module@1.0.39': {} + + '@parcel/watcher-android-arm64@2.5.0': + optional: true + + '@parcel/watcher-darwin-arm64@2.5.0': + optional: true + + '@parcel/watcher-darwin-x64@2.5.0': + optional: true + + '@parcel/watcher-freebsd-x64@2.5.0': + optional: true + + '@parcel/watcher-linux-arm-glibc@2.5.0': + optional: true + + '@parcel/watcher-linux-arm-musl@2.5.0': + optional: true + + '@parcel/watcher-linux-arm64-glibc@2.5.0': + optional: true + + '@parcel/watcher-linux-arm64-musl@2.5.0': + optional: true + + '@parcel/watcher-linux-x64-glibc@2.5.0': + optional: true + + '@parcel/watcher-linux-x64-musl@2.5.0': + optional: true + + '@parcel/watcher-win32-arm64@2.5.0': + optional: true + + '@parcel/watcher-win32-ia32@2.5.0': + optional: true + + '@parcel/watcher-win32-x64@2.5.0': + optional: true + + '@parcel/watcher@2.5.0': + dependencies: + detect-libc: 1.0.3 + is-glob: 4.0.3 + micromatch: 4.0.8 + node-addon-api: 7.1.1 + optionalDependencies: + '@parcel/watcher-android-arm64': 2.5.0 + '@parcel/watcher-darwin-arm64': 2.5.0 + '@parcel/watcher-darwin-x64': 2.5.0 + '@parcel/watcher-freebsd-x64': 2.5.0 + '@parcel/watcher-linux-arm-glibc': 2.5.0 + '@parcel/watcher-linux-arm-musl': 2.5.0 + '@parcel/watcher-linux-arm64-glibc': 2.5.0 + '@parcel/watcher-linux-arm64-musl': 2.5.0 + '@parcel/watcher-linux-x64-glibc': 2.5.0 + '@parcel/watcher-linux-x64-musl': 2.5.0 + '@parcel/watcher-win32-arm64': 2.5.0 + '@parcel/watcher-win32-ia32': 2.5.0 + '@parcel/watcher-win32-x64': 2.5.0 + optional: true + + '@pkgjs/parseargs@0.11.0': + optional: true + + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + + '@radix-ui/number@1.1.0': {} + + '@radix-ui/primitive@1.1.0': {} + + '@radix-ui/react-arrow@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-avatar@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-collection@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-compose-refs@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-context@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-context@1.1.1(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-dialog@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + aria-hidden: 1.2.4 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + react-remove-scroll: 2.6.0(@types/react@18.3.12)(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-direction@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-dismissable-layer@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-escape-keydown': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-focus-guards@1.1.1(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-focus-scope@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-icons@1.3.1(react@18.3.1)': + dependencies: + react: 18.3.1 + + '@radix-ui/react-id@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-label@2.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-popover@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-popper': 1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + aria-hidden: 1.2.4 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + react-remove-scroll: 2.6.0(@types/react@18.3.12)(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-popper@1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@floating-ui/react-dom': 2.1.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-arrow': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-rect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-size': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/rect': 1.1.0 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-portal@1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-presence@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-primitive@2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-roving-focus@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-select@2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/number': 1.1.0 + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-collection': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-focus-guards': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-focus-scope': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-popper': 1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-previous': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + aria-hidden: 1.2.4 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + react-remove-scroll: 2.6.0(@types/react@18.3.12)(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-slot@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-tabs@1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-direction': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-roving-focus': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-tooltip@1.1.3(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/primitive': 1.1.0 + '@radix-ui/react-compose-refs': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-dismissable-layer': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-id': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-popper': 1.2.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-portal': 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-presence': 1.1.1(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@radix-ui/react-slot': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1) + '@radix-ui/react-visually-hidden': 1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/react-use-callback-ref@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-use-controllable-state@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-use-escape-keydown@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-use-layout-effect@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-use-previous@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-use-rect@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + '@radix-ui/rect': 1.1.0 + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-use-size@1.1.0(@types/react@18.3.12)(react@18.3.1)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.0(@types/react@18.3.12)(react@18.3.1) + react: 18.3.1 + optionalDependencies: + '@types/react': 18.3.12 + + '@radix-ui/react-visually-hidden@1.1.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + '@types/react-dom': 18.3.1 + + '@radix-ui/rect@1.1.0': {} + + '@reduxjs/toolkit@2.3.0(react-redux@9.1.2(@types/react@18.3.12)(react@18.3.1)(redux@5.0.1))(react@18.3.1)': + dependencies: + immer: 10.1.1 + redux: 5.0.1 + redux-thunk: 3.1.0(redux@5.0.1) + reselect: 5.1.1 + optionalDependencies: + react: 18.3.1 + react-redux: 9.1.2(@types/react@18.3.12)(react@18.3.1)(redux@5.0.1) + + '@rtsao/scc@1.1.0': {} + + '@rushstack/eslint-patch@1.10.4': {} + + '@svgr/babel-plugin-add-jsx-attribute@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-remove-jsx-attribute@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-svg-dynamic-title@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-svg-em-dimensions@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-transform-react-native-svg@8.1.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-plugin-transform-svg-component@8.0.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + + '@svgr/babel-preset@8.1.0(@babel/core@7.26.0)': + dependencies: + '@babel/core': 7.26.0 + '@svgr/babel-plugin-add-jsx-attribute': 8.0.0(@babel/core@7.26.0) + '@svgr/babel-plugin-remove-jsx-attribute': 8.0.0(@babel/core@7.26.0) + '@svgr/babel-plugin-remove-jsx-empty-expression': 8.0.0(@babel/core@7.26.0) + '@svgr/babel-plugin-replace-jsx-attribute-value': 8.0.0(@babel/core@7.26.0) + '@svgr/babel-plugin-svg-dynamic-title': 8.0.0(@babel/core@7.26.0) + '@svgr/babel-plugin-svg-em-dimensions': 8.0.0(@babel/core@7.26.0) + '@svgr/babel-plugin-transform-react-native-svg': 8.1.0(@babel/core@7.26.0) + '@svgr/babel-plugin-transform-svg-component': 8.0.0(@babel/core@7.26.0) + + '@svgr/core@8.1.0(typescript@5.6.3)': + dependencies: + '@babel/core': 7.26.0 + '@svgr/babel-preset': 8.1.0(@babel/core@7.26.0) + camelcase: 6.3.0 + cosmiconfig: 8.3.6(typescript@5.6.3) + snake-case: 3.0.4 + transitivePeerDependencies: + - supports-color + - typescript + + '@svgr/hast-util-to-babel-ast@8.0.0': + dependencies: + '@babel/types': 7.26.0 + entities: 4.5.0 + + '@svgr/plugin-jsx@8.1.0(@svgr/core@8.1.0(typescript@5.6.3))': + dependencies: + '@babel/core': 7.26.0 + '@svgr/babel-preset': 8.1.0(@babel/core@7.26.0) + '@svgr/core': 8.1.0(typescript@5.6.3) + '@svgr/hast-util-to-babel-ast': 8.0.0 + svg-parser: 2.0.4 + transitivePeerDependencies: + - supports-color + + '@svgr/plugin-svgo@8.1.0(@svgr/core@8.1.0(typescript@5.6.3))(typescript@5.6.3)': + dependencies: + '@svgr/core': 8.1.0(typescript@5.6.3) + cosmiconfig: 8.3.6(typescript@5.6.3) + deepmerge: 4.3.1 + svgo: 3.3.2 + transitivePeerDependencies: + - typescript + + '@svgr/webpack@8.1.0(typescript@5.6.3)': + dependencies: + '@babel/core': 7.26.0 + '@babel/plugin-transform-react-constant-elements': 7.25.9(@babel/core@7.26.0) + '@babel/preset-env': 7.26.0(@babel/core@7.26.0) + '@babel/preset-react': 7.25.9(@babel/core@7.26.0) + '@babel/preset-typescript': 7.26.0(@babel/core@7.26.0) + '@svgr/core': 8.1.0(typescript@5.6.3) + '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0(typescript@5.6.3)) + '@svgr/plugin-svgo': 8.1.0(@svgr/core@8.1.0(typescript@5.6.3))(typescript@5.6.3) + transitivePeerDependencies: + - supports-color + - typescript + + '@swc/counter@0.1.3': {} + + '@swc/helpers@0.5.13': + dependencies: + tslib: 2.8.1 + + '@trysound/sax@0.2.0': {} + + '@types/hoist-non-react-statics@3.3.5': + dependencies: + '@types/react': 18.3.12 + hoist-non-react-statics: 3.3.2 + + '@types/json5@0.0.29': {} + + '@types/linkify-it@5.0.0': {} + + '@types/lodash-es@4.17.12': + dependencies: + '@types/lodash': 4.17.13 + + '@types/lodash@4.17.13': {} + + '@types/markdown-it@14.1.2': + dependencies: + '@types/linkify-it': 5.0.0 + '@types/mdurl': 2.0.0 + + '@types/mdurl@2.0.0': {} + + '@types/node@20.17.6': + dependencies: + undici-types: 6.19.8 + + '@types/prop-types@15.7.13': {} + + '@types/react-dom@18.3.1': + dependencies: + '@types/react': 18.3.12 + + '@types/react-redux@7.1.34': + dependencies: + '@types/hoist-non-react-statics': 3.3.5 + '@types/react': 18.3.12 + hoist-non-react-statics: 3.3.2 + redux: 4.2.1 + + '@types/react@18.3.12': + dependencies: + '@types/prop-types': 15.7.13 + csstype: 3.1.3 + + '@types/use-sync-external-store@0.0.3': {} + + '@typescript-eslint/eslint-plugin@8.13.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@eslint-community/regexpp': 4.12.1 + '@typescript-eslint/parser': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/scope-manager': 8.13.0 + '@typescript-eslint/type-utils': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/utils': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/visitor-keys': 8.13.0 + eslint: 8.57.1 + graphemer: 1.4.0 + ignore: 5.3.2 + natural-compare: 1.4.0 + ts-api-utils: 1.4.0(typescript@5.6.3) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.13.0 + '@typescript-eslint/types': 8.13.0 + '@typescript-eslint/typescript-estree': 8.13.0(typescript@5.6.3) + '@typescript-eslint/visitor-keys': 8.13.0 + debug: 4.3.7 + eslint: 8.57.1 + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.13.0': + dependencies: + '@typescript-eslint/types': 8.13.0 + '@typescript-eslint/visitor-keys': 8.13.0 + + '@typescript-eslint/type-utils@8.13.0(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@typescript-eslint/typescript-estree': 8.13.0(typescript@5.6.3) + '@typescript-eslint/utils': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + debug: 4.3.7 + ts-api-utils: 1.4.0(typescript@5.6.3) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - eslint + - supports-color + + '@typescript-eslint/types@8.13.0': {} + + '@typescript-eslint/typescript-estree@8.13.0(typescript@5.6.3)': + dependencies: + '@typescript-eslint/types': 8.13.0 + '@typescript-eslint/visitor-keys': 8.13.0 + debug: 4.3.7 + fast-glob: 3.3.2 + is-glob: 4.0.3 + minimatch: 9.0.5 + semver: 7.6.3 + ts-api-utils: 1.4.0(typescript@5.6.3) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.13.0(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@8.57.1) + '@typescript-eslint/scope-manager': 8.13.0 + '@typescript-eslint/types': 8.13.0 + '@typescript-eslint/typescript-estree': 8.13.0(typescript@5.6.3) + eslint: 8.57.1 + transitivePeerDependencies: + - supports-color + - typescript + + '@typescript-eslint/visitor-keys@8.13.0': + dependencies: + '@typescript-eslint/types': 8.13.0 + eslint-visitor-keys: 3.4.3 + + '@ungap/structured-clone@1.2.0': {} + + acorn-jsx@5.3.2(acorn@8.14.0): + dependencies: + acorn: 8.14.0 + + acorn@8.14.0: {} + + agora-rtc-sdk-ng@4.22.0: + dependencies: + '@agora-js/media': 4.22.0 + '@agora-js/report': 4.22.0 + '@agora-js/shared': 4.22.0 + agora-rte-extension: 1.2.4 + axios: 1.7.7 + formdata-polyfill: 4.0.10 + pako: 2.1.0 + ua-parser-js: 0.7.39 + webrtc-adapter: 8.2.0 + transitivePeerDependencies: + - debug + optional: true + + agora-rtc-sdk-ng@4.22.2: + dependencies: + '@agora-js/media': 4.22.2 + '@agora-js/report': 4.22.2 + '@agora-js/shared': 4.22.2 + agora-rte-extension: 1.2.4 + axios: 1.7.7 + formdata-polyfill: 4.0.10 + pako: 2.1.0 + ua-parser-js: 0.7.39 + webrtc-adapter: 8.2.0 + transitivePeerDependencies: + - debug + + agora-rte-extension@1.2.4: {} + + agora-rtm@2.2.0: + optionalDependencies: + agora-rtc-sdk-ng: 4.22.0 + transitivePeerDependencies: + - debug + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-regex@5.0.1: {} + + ansi-regex@6.1.0: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + ansi-styles@6.2.1: {} + + any-promise@1.3.0: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + arg@5.0.2: {} + + argparse@2.0.1: {} + + aria-hidden@1.2.4: + dependencies: + tslib: 2.8.1 + + aria-query@5.3.2: {} + + array-buffer-byte-length@1.0.1: + dependencies: + call-bind: 1.0.7 + is-array-buffer: 3.0.4 + + array-includes@3.1.8: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-object-atoms: 1.0.0 + get-intrinsic: 1.2.4 + is-string: 1.0.7 + + array.prototype.findlast@1.2.5: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + es-object-atoms: 1.0.0 + es-shim-unscopables: 1.0.2 + + array.prototype.findlastindex@1.2.5: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + es-object-atoms: 1.0.0 + es-shim-unscopables: 1.0.2 + + array.prototype.flat@1.3.2: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-shim-unscopables: 1.0.2 + + array.prototype.flatmap@1.3.2: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-shim-unscopables: 1.0.2 + + array.prototype.tosorted@1.1.4: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + es-shim-unscopables: 1.0.2 + + arraybuffer.prototype.slice@1.0.3: + dependencies: + array-buffer-byte-length: 1.0.1 + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 + is-array-buffer: 3.0.4 + is-shared-array-buffer: 1.0.3 + + ast-types-flow@0.0.8: {} + + asynckit@0.4.0: {} + + autoprefixer@10.4.20(postcss@8.4.48): + dependencies: + browserslist: 4.24.2 + caniuse-lite: 1.0.30001680 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.1.1 + postcss: 8.4.48 + postcss-value-parser: 4.2.0 + + available-typed-arrays@1.0.7: + dependencies: + possible-typed-array-names: 1.0.0 + + axe-core@4.10.2: {} + + axios@1.7.7: + dependencies: + follow-redirects: 1.15.9 + form-data: 4.0.1 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + + axobject-query@4.1.0: {} + + babel-plugin-polyfill-corejs2@0.4.11(@babel/core@7.26.0): + dependencies: + '@babel/compat-data': 7.26.2 + '@babel/core': 7.26.0 + '@babel/helper-define-polyfill-provider': 0.6.2(@babel/core@7.26.0) + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + babel-plugin-polyfill-corejs3@0.10.6(@babel/core@7.26.0): + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-define-polyfill-provider': 0.6.2(@babel/core@7.26.0) + core-js-compat: 3.39.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-polyfill-regenerator@0.6.2(@babel/core@7.26.0): + dependencies: + '@babel/core': 7.26.0 + '@babel/helper-define-polyfill-provider': 0.6.2(@babel/core@7.26.0) + transitivePeerDependencies: + - supports-color + + balanced-match@1.0.2: {} + + binary-extensions@2.3.0: {} + + bluebird@3.7.2: {} + + boolbase@1.0.0: {} + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.24.2: + dependencies: + caniuse-lite: 1.0.30001680 + electron-to-chromium: 1.5.55 + node-releases: 2.0.18 + update-browserslist-db: 1.1.1(browserslist@4.24.2) + + busboy@1.6.0: + dependencies: + streamsearch: 1.1.0 + + call-bind@1.0.7: + dependencies: + es-define-property: 1.0.0 + es-errors: 1.3.0 + function-bind: 1.1.2 + get-intrinsic: 1.2.4 + set-function-length: 1.2.2 + + callsites@3.1.0: {} + + camelcase-css@2.0.1: {} + + camelcase@6.3.0: {} + + caniuse-lite@1.0.30001680: {} + + catharsis@0.9.0: + dependencies: + lodash: 4.17.21 + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + chokidar@3.6.0: + dependencies: + anymatch: 3.1.3 + braces: 3.0.3 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + + chokidar@4.0.1: + dependencies: + readdirp: 4.0.2 + + class-variance-authority@0.7.0: + dependencies: + clsx: 2.0.0 + + client-only@0.0.1: {} + + clsx@2.0.0: {} + + clsx@2.1.1: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + color-string@1.9.1: + dependencies: + color-name: 1.1.4 + simple-swizzle: 0.2.2 + optional: true + + color@4.2.3: + dependencies: + color-convert: 2.0.1 + color-string: 1.9.1 + optional: true + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + commander@4.1.1: {} + + commander@7.2.0: {} + + concat-map@0.0.1: {} + + convert-source-map@2.0.0: {} + + core-js-compat@3.39.0: + dependencies: + browserslist: 4.24.2 + + cosmiconfig@8.3.6(typescript@5.6.3): + dependencies: + import-fresh: 3.3.0 + js-yaml: 4.1.0 + parse-json: 5.2.0 + path-type: 4.0.0 + optionalDependencies: + typescript: 5.6.3 + + cross-spawn@7.0.5: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + css-select@5.1.0: + dependencies: + boolbase: 1.0.0 + css-what: 6.1.0 + domhandler: 5.0.3 + domutils: 3.1.0 + nth-check: 2.1.1 + + css-tree@2.2.1: + dependencies: + mdn-data: 2.0.28 + source-map-js: 1.2.1 + + css-tree@2.3.1: + dependencies: + mdn-data: 2.0.30 + source-map-js: 1.2.1 + + css-what@6.1.0: {} + + cssesc@3.0.0: {} + + csso@5.0.5: + dependencies: + css-tree: 2.2.1 + + csstype@3.1.3: {} + + damerau-levenshtein@1.0.8: {} + + data-view-buffer@1.0.1: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + is-data-view: 1.0.1 + + data-view-byte-length@1.0.1: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + is-data-view: 1.0.1 + + data-view-byte-offset@1.0.0: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + is-data-view: 1.0.1 + + debug@3.2.7: + dependencies: + ms: 2.1.3 + + debug@4.3.7: + dependencies: + ms: 2.1.3 + + decode-uri-component@0.4.1: {} + + deep-is@0.1.4: {} + + deepmerge@4.3.1: {} + + define-data-property@1.1.4: + dependencies: + es-define-property: 1.0.0 + es-errors: 1.3.0 + gopd: 1.0.1 + + define-properties@1.2.1: + dependencies: + define-data-property: 1.1.4 + has-property-descriptors: 1.0.2 + object-keys: 1.1.1 + + defu@6.1.4: {} + + delayed-stream@1.0.0: {} + + destr@2.0.3: {} + + detect-libc@1.0.3: + optional: true + + detect-libc@2.0.3: + optional: true + + detect-node-es@1.1.0: {} + + didyoumean@1.2.2: {} + + dlv@1.1.3: {} + + doctrine@2.1.0: + dependencies: + esutils: 2.0.3 + + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + + dom-serializer@2.0.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + entities: 4.5.0 + + domelementtype@2.3.0: {} + + domhandler@5.0.3: + dependencies: + domelementtype: 2.3.0 + + domutils@3.1.0: + dependencies: + dom-serializer: 2.0.0 + domelementtype: 2.3.0 + domhandler: 5.0.3 + + dot-case@3.0.4: + dependencies: + no-case: 3.0.4 + tslib: 2.8.1 + + eastasianwidth@0.2.0: {} + + electron-to-chromium@1.5.55: {} + + emoji-regex@8.0.0: {} + + emoji-regex@9.2.2: {} + + enhanced-resolve@5.17.1: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.2.1 + + entities@4.5.0: {} + + error-ex@1.3.2: + dependencies: + is-arrayish: 0.2.1 + + es-abstract@1.23.3: + dependencies: + array-buffer-byte-length: 1.0.1 + arraybuffer.prototype.slice: 1.0.3 + available-typed-arrays: 1.0.7 + call-bind: 1.0.7 + data-view-buffer: 1.0.1 + data-view-byte-length: 1.0.1 + data-view-byte-offset: 1.0.0 + es-define-property: 1.0.0 + es-errors: 1.3.0 + es-object-atoms: 1.0.0 + es-set-tostringtag: 2.0.3 + es-to-primitive: 1.2.1 + function.prototype.name: 1.1.6 + get-intrinsic: 1.2.4 + get-symbol-description: 1.0.2 + globalthis: 1.0.4 + gopd: 1.0.1 + has-property-descriptors: 1.0.2 + has-proto: 1.0.3 + has-symbols: 1.0.3 + hasown: 2.0.2 + internal-slot: 1.0.7 + is-array-buffer: 3.0.4 + is-callable: 1.2.7 + is-data-view: 1.0.1 + is-negative-zero: 2.0.3 + is-regex: 1.1.4 + is-shared-array-buffer: 1.0.3 + is-string: 1.0.7 + is-typed-array: 1.1.13 + is-weakref: 1.0.2 + object-inspect: 1.13.3 + object-keys: 1.1.1 + object.assign: 4.1.5 + regexp.prototype.flags: 1.5.3 + safe-array-concat: 1.1.2 + safe-regex-test: 1.0.3 + string.prototype.trim: 1.2.9 + string.prototype.trimend: 1.0.8 + string.prototype.trimstart: 1.0.8 + typed-array-buffer: 1.0.2 + typed-array-byte-length: 1.0.1 + typed-array-byte-offset: 1.0.2 + typed-array-length: 1.0.6 + unbox-primitive: 1.0.2 + which-typed-array: 1.1.15 + + es-define-property@1.0.0: + dependencies: + get-intrinsic: 1.2.4 + + es-errors@1.3.0: {} + + es-iterator-helpers@1.2.0: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + es-set-tostringtag: 2.0.3 + function-bind: 1.1.2 + get-intrinsic: 1.2.4 + globalthis: 1.0.4 + gopd: 1.0.1 + has-property-descriptors: 1.0.2 + has-proto: 1.0.3 + has-symbols: 1.0.3 + internal-slot: 1.0.7 + iterator.prototype: 1.1.3 + safe-array-concat: 1.1.2 + + es-object-atoms@1.0.0: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.0.3: + dependencies: + get-intrinsic: 1.2.4 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + es-shim-unscopables@1.0.2: + dependencies: + hasown: 2.0.2 + + es-to-primitive@1.2.1: + dependencies: + is-callable: 1.2.7 + is-date-object: 1.0.5 + is-symbol: 1.0.4 + + escalade@3.2.0: {} + + escape-string-regexp@2.0.0: {} + + escape-string-regexp@4.0.0: {} + + escodegen@1.14.3: + dependencies: + esprima: 4.0.1 + estraverse: 4.3.0 + esutils: 2.0.3 + optionator: 0.8.3 + optionalDependencies: + source-map: 0.6.1 + + eslint-config-next@15.0.3(eslint@8.57.1)(typescript@5.6.3): + dependencies: + '@next/eslint-plugin-next': 15.0.3 + '@rushstack/eslint-patch': 1.10.4 + '@typescript-eslint/eslint-plugin': 8.13.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + eslint: 8.57.1 + eslint-import-resolver-node: 0.3.9 + eslint-import-resolver-typescript: 3.6.3(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.31.0)(eslint@8.57.1) + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) + eslint-plugin-jsx-a11y: 6.10.2(eslint@8.57.1) + eslint-plugin-react: 7.37.2(eslint@8.57.1) + eslint-plugin-react-hooks: 5.0.0(eslint@8.57.1) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - eslint-import-resolver-webpack + - eslint-plugin-import-x + - supports-color + + eslint-import-resolver-node@0.3.9: + dependencies: + debug: 3.2.7 + is-core-module: 2.15.1 + resolve: 1.22.8 + transitivePeerDependencies: + - supports-color + + eslint-import-resolver-typescript@3.6.3(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.31.0)(eslint@8.57.1): + dependencies: + '@nolyfill/is-core-module': 1.0.39 + debug: 4.3.7 + enhanced-resolve: 5.17.1 + eslint: 8.57.1 + eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) + fast-glob: 3.3.2 + get-tsconfig: 4.8.1 + is-bun-module: 1.2.1 + is-glob: 4.0.3 + optionalDependencies: + eslint-plugin-import: 2.31.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) + transitivePeerDependencies: + - '@typescript-eslint/parser' + - eslint-import-resolver-node + - eslint-import-resolver-webpack + - supports-color + + eslint-module-utils@2.12.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1): + dependencies: + debug: 3.2.7 + optionalDependencies: + '@typescript-eslint/parser': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + eslint: 8.57.1 + eslint-import-resolver-node: 0.3.9 + eslint-import-resolver-typescript: 3.6.3(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.31.0)(eslint@8.57.1) + transitivePeerDependencies: + - supports-color + + eslint-plugin-import@2.31.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1): + dependencies: + '@rtsao/scc': 1.1.0 + array-includes: 3.1.8 + array.prototype.findlastindex: 1.2.5 + array.prototype.flat: 1.3.2 + array.prototype.flatmap: 1.3.2 + debug: 3.2.7 + doctrine: 2.1.0 + eslint: 8.57.1 + eslint-import-resolver-node: 0.3.9 + eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.13.0(eslint@8.57.1)(typescript@5.6.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.3)(eslint@8.57.1) + hasown: 2.0.2 + is-core-module: 2.15.1 + is-glob: 4.0.3 + minimatch: 3.1.2 + object.fromentries: 2.0.8 + object.groupby: 1.0.3 + object.values: 1.2.0 + semver: 6.3.1 + string.prototype.trimend: 1.0.8 + tsconfig-paths: 3.15.0 + optionalDependencies: + '@typescript-eslint/parser': 8.13.0(eslint@8.57.1)(typescript@5.6.3) + transitivePeerDependencies: + - eslint-import-resolver-typescript + - eslint-import-resolver-webpack + - supports-color + + eslint-plugin-jsx-a11y@6.10.2(eslint@8.57.1): + dependencies: + aria-query: 5.3.2 + array-includes: 3.1.8 + array.prototype.flatmap: 1.3.2 + ast-types-flow: 0.0.8 + axe-core: 4.10.2 + axobject-query: 4.1.0 + damerau-levenshtein: 1.0.8 + emoji-regex: 9.2.2 + eslint: 8.57.1 + hasown: 2.0.2 + jsx-ast-utils: 3.3.5 + language-tags: 1.0.9 + minimatch: 3.1.2 + object.fromentries: 2.0.8 + safe-regex-test: 1.0.3 + string.prototype.includes: 2.0.1 + + eslint-plugin-react-hooks@5.0.0(eslint@8.57.1): + dependencies: + eslint: 8.57.1 + + eslint-plugin-react@7.37.2(eslint@8.57.1): + dependencies: + array-includes: 3.1.8 + array.prototype.findlast: 1.2.5 + array.prototype.flatmap: 1.3.2 + array.prototype.tosorted: 1.1.4 + doctrine: 2.1.0 + es-iterator-helpers: 1.2.0 + eslint: 8.57.1 + estraverse: 5.3.0 + hasown: 2.0.2 + jsx-ast-utils: 3.3.5 + minimatch: 3.1.2 + object.entries: 1.1.8 + object.fromentries: 2.0.8 + object.values: 1.2.0 + prop-types: 15.8.1 + resolve: 2.0.0-next.5 + semver: 6.3.1 + string.prototype.matchall: 4.0.11 + string.prototype.repeat: 1.0.0 + + eslint-scope@7.2.2: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint@8.57.1: + dependencies: + '@eslint-community/eslint-utils': 4.4.1(eslint@8.57.1) + '@eslint-community/regexpp': 4.12.1 + '@eslint/eslintrc': 2.1.4 + '@eslint/js': 8.57.1 + '@humanwhocodes/config-array': 0.13.0 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.2.0 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.5 + debug: 4.3.7 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.6.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.24.0 + graphemer: 1.4.0 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + + espree@9.6.1: + dependencies: + acorn: 8.14.0 + acorn-jsx: 5.3.2(acorn@8.14.0) + eslint-visitor-keys: 3.4.3 + + esprima@4.0.1: {} + + esquery@1.6.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@4.3.0: {} + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + fast-deep-equal@3.1.3: {} + + fast-glob@3.3.1: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-glob@3.3.2: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.17.1: + dependencies: + reusify: 1.0.4 + + fetch-blob@3.2.0: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.3.3 + + file-entry-cache@6.0.1: + dependencies: + flat-cache: 3.2.0 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@3.2.0: + dependencies: + flatted: 3.3.1 + keyv: 4.5.4 + rimraf: 3.0.2 + + flatted@3.3.1: {} + + follow-redirects@1.15.9: {} + + for-each@0.3.3: + dependencies: + is-callable: 1.2.7 + + foreground-child@3.3.0: + dependencies: + cross-spawn: 7.0.5 + signal-exit: 4.1.0 + + form-data@4.0.1: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.35 + + formdata-polyfill@4.0.10: + dependencies: + fetch-blob: 3.2.0 + + fraction.js@4.3.7: {} + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + function.prototype.name@1.1.6: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + functions-have-names: 1.2.3 + + functions-have-names@1.2.3: {} + + gensync@1.0.0-beta.2: {} + + get-intrinsic@1.2.4: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + has-proto: 1.0.3 + has-symbols: 1.0.3 + hasown: 2.0.2 + + get-nonce@1.0.1: {} + + get-symbol-description@1.0.2: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 + + get-tsconfig@4.8.1: + dependencies: + resolve-pkg-maps: 1.0.0 + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@10.4.5: + dependencies: + foreground-child: 3.3.0 + jackspeak: 3.4.3 + minimatch: 9.0.5 + minipass: 7.1.2 + package-json-from-dist: 1.0.1 + path-scurry: 1.11.1 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + glob@8.1.0: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 5.1.6 + once: 1.4.0 + + globals@11.12.0: {} + + globals@13.24.0: + dependencies: + type-fest: 0.20.2 + + globalthis@1.0.4: + dependencies: + define-properties: 1.2.1 + gopd: 1.0.1 + + gopd@1.0.1: + dependencies: + get-intrinsic: 1.2.4 + + graceful-fs@4.2.11: {} + + graphemer@1.4.0: {} + + has-bigints@1.0.2: {} + + has-flag@4.0.0: {} + + has-property-descriptors@1.0.2: + dependencies: + es-define-property: 1.0.0 + + has-proto@1.0.3: {} + + has-symbols@1.0.3: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.0.3 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + hoist-non-react-statics@3.3.2: + dependencies: + react-is: 16.13.1 + + ignore@5.3.2: {} + + immer@10.1.1: {} + + immutable@4.3.7: {} + + import-fresh@3.3.0: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + internal-slot@1.0.7: + dependencies: + es-errors: 1.3.0 + hasown: 2.0.2 + side-channel: 1.0.6 + + invariant@2.2.4: + dependencies: + loose-envify: 1.4.0 + + is-array-buffer@3.0.4: + dependencies: + call-bind: 1.0.7 + get-intrinsic: 1.2.4 + + is-arrayish@0.2.1: {} + + is-arrayish@0.3.2: + optional: true + + is-async-function@2.0.0: + dependencies: + has-tostringtag: 1.0.2 + + is-bigint@1.0.4: + dependencies: + has-bigints: 1.0.2 + + is-binary-path@2.1.0: + dependencies: + binary-extensions: 2.3.0 + + is-boolean-object@1.1.2: + dependencies: + call-bind: 1.0.7 + has-tostringtag: 1.0.2 + + is-bun-module@1.2.1: + dependencies: + semver: 7.6.3 + + is-callable@1.2.7: {} + + is-core-module@2.15.1: + dependencies: + hasown: 2.0.2 + + is-data-view@1.0.1: + dependencies: + is-typed-array: 1.1.13 + + is-date-object@1.0.5: + dependencies: + has-tostringtag: 1.0.2 + + is-extglob@2.1.1: {} + + is-finalizationregistry@1.0.2: + dependencies: + call-bind: 1.0.7 + + is-fullwidth-code-point@3.0.0: {} + + is-generator-function@1.0.10: + dependencies: + has-tostringtag: 1.0.2 + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-map@2.0.3: {} + + is-negative-zero@2.0.3: {} + + is-number-object@1.0.7: + dependencies: + has-tostringtag: 1.0.2 + + is-number@7.0.0: {} + + is-path-inside@3.0.3: {} + + is-regex@1.1.4: + dependencies: + call-bind: 1.0.7 + has-tostringtag: 1.0.2 + + is-set@2.0.3: {} + + is-shared-array-buffer@1.0.3: + dependencies: + call-bind: 1.0.7 + + is-string@1.0.7: + dependencies: + has-tostringtag: 1.0.2 + + is-symbol@1.0.4: + dependencies: + has-symbols: 1.0.3 + + is-typed-array@1.1.13: + dependencies: + which-typed-array: 1.1.15 + + is-weakmap@2.0.2: {} + + is-weakref@1.0.2: + dependencies: + call-bind: 1.0.7 + + is-weakset@2.0.3: + dependencies: + call-bind: 1.0.7 + get-intrinsic: 1.2.4 + + isarray@2.0.5: {} + + isexe@2.0.0: {} + + iterator.prototype@1.1.3: + dependencies: + define-properties: 1.2.1 + get-intrinsic: 1.2.4 + has-symbols: 1.0.3 + reflect.getprototypeof: 1.0.6 + set-function-name: 2.0.2 + + jackspeak@3.4.3: + dependencies: + '@isaacs/cliui': 8.0.2 + optionalDependencies: + '@pkgjs/parseargs': 0.11.0 + + jiti@1.21.6: {} + + js-tokens@4.0.0: {} + + js-yaml@4.1.0: + dependencies: + argparse: 2.0.1 + + js2xmlparser@4.0.2: + dependencies: + xmlcreate: 2.0.4 + + jsdoc@4.0.4: + dependencies: + '@babel/parser': 7.26.2 + '@jsdoc/salty': 0.2.8 + '@types/markdown-it': 14.1.2 + bluebird: 3.7.2 + catharsis: 0.9.0 + escape-string-regexp: 2.0.0 + js2xmlparser: 4.0.2 + klaw: 3.0.0 + markdown-it: 14.1.0 + markdown-it-anchor: 8.6.7(@types/markdown-it@14.1.2)(markdown-it@14.1.0) + marked: 4.3.0 + mkdirp: 1.0.4 + requizzle: 0.2.4 + strip-json-comments: 3.1.1 + underscore: 1.13.7 + + jsesc@3.0.2: {} + + json-buffer@3.0.1: {} + + json-parse-even-better-errors@2.3.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json5@1.0.2: + dependencies: + minimist: 1.2.8 + + json5@2.2.3: {} + + jsx-ast-utils@3.3.5: + dependencies: + array-includes: 3.1.8 + array.prototype.flat: 1.3.2 + object.assign: 4.1.5 + object.values: 1.2.0 + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + klaw@3.0.0: + dependencies: + graceful-fs: 4.2.11 + + language-subtag-registry@0.3.23: {} + + language-tags@1.0.9: + dependencies: + language-subtag-registry: 0.3.23 + + levn@0.3.0: + dependencies: + prelude-ls: 1.1.2 + type-check: 0.3.2 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + lilconfig@2.1.0: {} + + lilconfig@3.1.2: {} + + lines-and-columns@1.2.4: {} + + linkify-it@5.0.0: + dependencies: + uc.micro: 2.1.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash-es@4.17.21: {} + + lodash.debounce@4.0.8: {} + + lodash.merge@4.6.2: {} + + lodash@4.17.21: {} + + long@5.2.3: {} + + loose-envify@1.4.0: + dependencies: + js-tokens: 4.0.0 + + lower-case@2.0.2: + dependencies: + tslib: 2.8.1 + + lru-cache@10.4.3: {} + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + lucide-react@0.453.0(react@18.3.1): + dependencies: + react: 18.3.1 + + markdown-it-anchor@8.6.7(@types/markdown-it@14.1.2)(markdown-it@14.1.0): + dependencies: + '@types/markdown-it': 14.1.2 + markdown-it: 14.1.0 + + markdown-it@14.1.0: + dependencies: + argparse: 2.0.1 + entities: 4.5.0 + linkify-it: 5.0.0 + mdurl: 2.0.0 + punycode.js: 2.3.1 + uc.micro: 2.1.0 + + marked@4.3.0: {} + + mdn-data@2.0.28: {} + + mdn-data@2.0.30: {} + + mdurl@2.0.0: {} + + merge2@1.4.1: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@5.1.6: + dependencies: + brace-expansion: 2.0.1 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.1 + + minimist@1.2.8: {} + + minipass@7.1.2: {} + + mkdirp@1.0.4: {} + + ms@2.1.3: {} + + mz@2.7.0: + dependencies: + any-promise: 1.3.0 + object-assign: 4.1.1 + thenify-all: 1.6.0 + + nanoid@3.3.7: {} + + natural-compare@1.4.0: {} + + next-themes@0.3.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + dependencies: + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + + next@15.0.3(@babel/core@7.26.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.80.6): + dependencies: + '@next/env': 15.0.3 + '@swc/counter': 0.1.3 + '@swc/helpers': 0.5.13 + busboy: 1.6.0 + caniuse-lite: 1.0.30001680 + postcss: 8.4.31 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + styled-jsx: 5.1.6(@babel/core@7.26.0)(react@18.3.1) + optionalDependencies: + '@next/swc-darwin-arm64': 15.0.3 + '@next/swc-darwin-x64': 15.0.3 + '@next/swc-linux-arm64-gnu': 15.0.3 + '@next/swc-linux-arm64-musl': 15.0.3 + '@next/swc-linux-x64-gnu': 15.0.3 + '@next/swc-linux-x64-musl': 15.0.3 + '@next/swc-win32-arm64-msvc': 15.0.3 + '@next/swc-win32-x64-msvc': 15.0.3 + sass: 1.80.6 + sharp: 0.33.5 + transitivePeerDependencies: + - '@babel/core' + - babel-plugin-macros + + no-case@3.0.4: + dependencies: + lower-case: 2.0.2 + tslib: 2.8.1 + + node-addon-api@7.1.1: + optional: true + + node-domexception@1.0.0: {} + + node-releases@2.0.18: {} + + normalize-path@3.0.0: {} + + normalize-range@0.1.2: {} + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + object-assign@4.1.1: {} + + object-hash@3.0.0: {} + + object-inspect@1.13.3: {} + + object-keys@1.1.1: {} + + object.assign@4.1.5: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + has-symbols: 1.0.3 + object-keys: 1.1.1 + + object.entries@1.1.8: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-object-atoms: 1.0.0 + + object.fromentries@2.0.8: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-object-atoms: 1.0.0 + + object.groupby@1.0.3: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + + object.values@1.2.0: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-object-atoms: 1.0.0 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + optionator@0.8.3: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.3.0 + prelude-ls: 1.1.2 + type-check: 0.3.2 + word-wrap: 1.2.5 + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-is-promise@4.0.0: {} + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + package-json-from-dist@1.0.1: {} + + pako@2.1.0: {} + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + parse-json@5.2.0: + dependencies: + '@babel/code-frame': 7.26.2 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-parse@1.0.7: {} + + path-scurry@1.11.1: + dependencies: + lru-cache: 10.4.3 + minipass: 7.1.2 + + path-type@4.0.0: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + pify@2.3.0: {} + + pirates@4.0.6: {} + + possible-typed-array-names@1.0.0: {} + + postcss-import@15.1.0(postcss@8.4.48): + dependencies: + postcss: 8.4.48 + postcss-value-parser: 4.2.0 + read-cache: 1.0.0 + resolve: 1.22.8 + + postcss-js@4.0.1(postcss@8.4.48): + dependencies: + camelcase-css: 2.0.1 + postcss: 8.4.48 + + postcss-load-config@4.0.2(postcss@8.4.48): + dependencies: + lilconfig: 3.1.2 + yaml: 2.6.0 + optionalDependencies: + postcss: 8.4.48 + + postcss-nested@6.2.0(postcss@8.4.48): + dependencies: + postcss: 8.4.48 + postcss-selector-parser: 6.1.2 + + postcss-selector-parser@6.1.2: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss-value-parser@4.2.0: {} + + postcss@8.4.31: + dependencies: + nanoid: 3.3.7 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postcss@8.4.48: + dependencies: + nanoid: 3.3.7 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.1.2: {} + + prelude-ls@1.2.1: {} + + prettier-plugin-tailwindcss@0.6.8(prettier@3.3.3): + dependencies: + prettier: 3.3.3 + + prettier@3.3.3: {} + + prop-types@15.8.1: + dependencies: + loose-envify: 1.4.0 + object-assign: 4.1.1 + react-is: 16.13.1 + + protobufjs-cli@1.1.3(protobufjs@7.4.0): + dependencies: + chalk: 4.1.2 + escodegen: 1.14.3 + espree: 9.6.1 + estraverse: 5.3.0 + glob: 8.1.0 + jsdoc: 4.0.4 + minimist: 1.2.8 + protobufjs: 7.4.0 + semver: 7.6.3 + tmp: 0.2.3 + uglify-js: 3.19.3 + + protobufjs@7.4.0: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 20.17.6 + long: 5.2.3 + + proxy-from-env@1.1.0: {} + + punycode.js@2.3.1: {} + + punycode@2.3.1: {} + + queue-microtask@1.2.3: {} + + react-colorful@5.6.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + dependencies: + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + + react-dom@18.3.1(react@18.3.1): + dependencies: + loose-envify: 1.4.0 + react: 18.3.1 + scheduler: 0.23.2 + + react-hook-form@7.53.2(react@18.3.1): + dependencies: + react: 18.3.1 + + react-is@16.13.1: {} + + react-redux@9.1.2(@types/react@18.3.12)(react@18.3.1)(redux@5.0.1): + dependencies: + '@types/use-sync-external-store': 0.0.3 + react: 18.3.1 + use-sync-external-store: 1.2.2(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + redux: 5.0.1 + + react-remove-scroll-bar@2.3.6(@types/react@18.3.12)(react@18.3.1): + dependencies: + react: 18.3.1 + react-style-singleton: 2.2.1(@types/react@18.3.12)(react@18.3.1) + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.3.12 + + react-remove-scroll@2.6.0(@types/react@18.3.12)(react@18.3.1): + dependencies: + react: 18.3.1 + react-remove-scroll-bar: 2.3.6(@types/react@18.3.12)(react@18.3.1) + react-style-singleton: 2.2.1(@types/react@18.3.12)(react@18.3.1) + tslib: 2.8.1 + use-callback-ref: 1.3.2(@types/react@18.3.12)(react@18.3.1) + use-sidecar: 1.1.2(@types/react@18.3.12)(react@18.3.1) + optionalDependencies: + '@types/react': 18.3.12 + + react-style-singleton@2.2.1(@types/react@18.3.12)(react@18.3.1): + dependencies: + get-nonce: 1.0.1 + invariant: 2.2.4 + react: 18.3.1 + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.3.12 + + react@18.3.1: + dependencies: + loose-envify: 1.4.0 + + read-cache@1.0.0: + dependencies: + pify: 2.3.0 + + readdirp@3.6.0: + dependencies: + picomatch: 2.3.1 + + readdirp@4.0.2: {} + + redux-thunk@3.1.0(redux@5.0.1): + dependencies: + redux: 5.0.1 + + redux@4.2.1: + dependencies: + '@babel/runtime': 7.26.0 + + redux@5.0.1: {} + + reflect.getprototypeof@1.0.6: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 + globalthis: 1.0.4 + which-builtin-type: 1.1.4 + + regenerate-unicode-properties@10.2.0: + dependencies: + regenerate: 1.4.2 + + regenerate@1.4.2: {} + + regenerator-runtime@0.14.1: {} + + regenerator-transform@0.15.2: + dependencies: + '@babel/runtime': 7.26.0 + + regexp.prototype.flags@1.5.3: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-errors: 1.3.0 + set-function-name: 2.0.2 + + regexpu-core@6.1.1: + dependencies: + regenerate: 1.4.2 + regenerate-unicode-properties: 10.2.0 + regjsgen: 0.8.0 + regjsparser: 0.11.2 + unicode-match-property-ecmascript: 2.0.0 + unicode-match-property-value-ecmascript: 2.2.0 + + regjsgen@0.8.0: {} + + regjsparser@0.11.2: + dependencies: + jsesc: 3.0.2 + + requizzle@0.2.4: + dependencies: + lodash: 4.17.21 + + reselect@5.1.1: {} + + resolve-from@4.0.0: {} + + resolve-pkg-maps@1.0.0: {} + + resolve@1.22.8: + dependencies: + is-core-module: 2.15.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + resolve@2.0.0-next.5: + dependencies: + is-core-module: 2.15.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + reusify@1.0.4: {} + + rimraf@3.0.2: + dependencies: + glob: 7.2.3 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + safe-array-concat@1.1.2: + dependencies: + call-bind: 1.0.7 + get-intrinsic: 1.2.4 + has-symbols: 1.0.3 + isarray: 2.0.5 + + safe-regex-test@1.0.3: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + is-regex: 1.1.4 + + sass@1.80.6: + dependencies: + chokidar: 4.0.1 + immutable: 4.3.7 + source-map-js: 1.2.1 + optionalDependencies: + '@parcel/watcher': 2.5.0 + + scheduler@0.23.2: + dependencies: + loose-envify: 1.4.0 + + sdp@3.2.0: {} + + semver@6.3.1: {} + + semver@7.6.3: {} + + set-function-length@1.2.2: + dependencies: + define-data-property: 1.1.4 + es-errors: 1.3.0 + function-bind: 1.1.2 + get-intrinsic: 1.2.4 + gopd: 1.0.1 + has-property-descriptors: 1.0.2 + + set-function-name@2.0.2: + dependencies: + define-data-property: 1.1.4 + es-errors: 1.3.0 + functions-have-names: 1.2.3 + has-property-descriptors: 1.0.2 + + sharp@0.33.5: + dependencies: + color: 4.2.3 + detect-libc: 2.0.3 + semver: 7.6.3 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.33.5 + '@img/sharp-darwin-x64': 0.33.5 + '@img/sharp-libvips-darwin-arm64': 1.0.4 + '@img/sharp-libvips-darwin-x64': 1.0.4 + '@img/sharp-libvips-linux-arm': 1.0.5 + '@img/sharp-libvips-linux-arm64': 1.0.4 + '@img/sharp-libvips-linux-s390x': 1.0.4 + '@img/sharp-libvips-linux-x64': 1.0.4 + '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 + '@img/sharp-libvips-linuxmusl-x64': 1.0.4 + '@img/sharp-linux-arm': 0.33.5 + '@img/sharp-linux-arm64': 0.33.5 + '@img/sharp-linux-s390x': 0.33.5 + '@img/sharp-linux-x64': 0.33.5 + '@img/sharp-linuxmusl-arm64': 0.33.5 + '@img/sharp-linuxmusl-x64': 0.33.5 + '@img/sharp-wasm32': 0.33.5 + '@img/sharp-win32-ia32': 0.33.5 + '@img/sharp-win32-x64': 0.33.5 + optional: true + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + side-channel@1.0.6: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 + object-inspect: 1.13.3 + + signal-exit@4.1.0: {} + + simple-swizzle@0.2.2: + dependencies: + is-arrayish: 0.3.2 + optional: true + + snake-case@3.0.4: + dependencies: + dot-case: 3.0.4 + tslib: 2.8.1 + + sonner@1.7.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + dependencies: + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + + source-map-js@1.2.1: {} + + source-map@0.6.1: + optional: true + + split-on-first@3.0.0: {} + + streamsearch@1.1.0: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string-width@5.1.2: + dependencies: + eastasianwidth: 0.2.0 + emoji-regex: 9.2.2 + strip-ansi: 7.1.0 + + string.prototype.includes@2.0.1: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + + string.prototype.matchall@4.0.11: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-errors: 1.3.0 + es-object-atoms: 1.0.0 + get-intrinsic: 1.2.4 + gopd: 1.0.1 + has-symbols: 1.0.3 + internal-slot: 1.0.7 + regexp.prototype.flags: 1.5.3 + set-function-name: 2.0.2 + side-channel: 1.0.6 + + string.prototype.repeat@1.0.0: + dependencies: + define-properties: 1.2.1 + es-abstract: 1.23.3 + + string.prototype.trim@1.2.9: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.23.3 + es-object-atoms: 1.0.0 + + string.prototype.trimend@1.0.8: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-object-atoms: 1.0.0 + + string.prototype.trimstart@1.0.8: + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-object-atoms: 1.0.0 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-ansi@7.1.0: + dependencies: + ansi-regex: 6.1.0 + + strip-bom@3.0.0: {} + + strip-json-comments@3.1.1: {} + + styled-jsx@5.1.6(@babel/core@7.26.0)(react@18.3.1): + dependencies: + client-only: 0.0.1 + react: 18.3.1 + optionalDependencies: + '@babel/core': 7.26.0 + + sucrase@3.35.0: + dependencies: + '@jridgewell/gen-mapping': 0.3.5 + commander: 4.1.1 + glob: 10.4.5 + lines-and-columns: 1.2.4 + mz: 2.7.0 + pirates: 4.0.6 + ts-interface-checker: 0.1.13 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + svg-parser@2.0.4: {} + + svgo@3.3.2: + dependencies: + '@trysound/sax': 0.2.0 + commander: 7.2.0 + css-select: 5.1.0 + css-tree: 2.3.1 + css-what: 6.1.0 + csso: 5.0.5 + picocolors: 1.1.1 + + swr@2.2.5(react@18.3.1): + dependencies: + client-only: 0.0.1 + react: 18.3.1 + use-sync-external-store: 1.2.2(react@18.3.1) + + tailwind-merge@2.5.4: {} + + tailwindcss-animate@1.0.7(tailwindcss@3.4.14): + dependencies: + tailwindcss: 3.4.14 + + tailwindcss@3.4.14: + dependencies: + '@alloc/quick-lru': 5.2.0 + arg: 5.0.2 + chokidar: 3.6.0 + didyoumean: 1.2.2 + dlv: 1.1.3 + fast-glob: 3.3.2 + glob-parent: 6.0.2 + is-glob: 4.0.3 + jiti: 1.21.6 + lilconfig: 2.1.0 + micromatch: 4.0.8 + normalize-path: 3.0.0 + object-hash: 3.0.0 + picocolors: 1.1.1 + postcss: 8.4.48 + postcss-import: 15.1.0(postcss@8.4.48) + postcss-js: 4.0.1(postcss@8.4.48) + postcss-load-config: 4.0.2(postcss@8.4.48) + postcss-nested: 6.2.0(postcss@8.4.48) + postcss-selector-parser: 6.1.2 + resolve: 1.22.8 + sucrase: 3.35.0 + transitivePeerDependencies: + - ts-node + + tapable@2.2.1: {} + + text-table@0.2.0: {} + + thenify-all@1.6.0: + dependencies: + thenify: 3.3.1 + + thenify@3.3.1: + dependencies: + any-promise: 1.3.0 + + tmp@0.2.3: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + ts-api-utils@1.4.0(typescript@5.6.3): + dependencies: + typescript: 5.6.3 + + ts-interface-checker@0.1.13: {} + + tsconfig-paths@3.15.0: + dependencies: + '@types/json5': 0.0.29 + json5: 1.0.2 + minimist: 1.2.8 + strip-bom: 3.0.0 + + tslib@2.8.1: {} + + type-check@0.3.2: + dependencies: + prelude-ls: 1.1.2 + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-fest@0.20.2: {} + + typed-array-buffer@1.0.2: + dependencies: + call-bind: 1.0.7 + es-errors: 1.3.0 + is-typed-array: 1.1.13 + + typed-array-byte-length@1.0.1: + dependencies: + call-bind: 1.0.7 + for-each: 0.3.3 + gopd: 1.0.1 + has-proto: 1.0.3 + is-typed-array: 1.1.13 + + typed-array-byte-offset@1.0.2: + dependencies: + available-typed-arrays: 1.0.7 + call-bind: 1.0.7 + for-each: 0.3.3 + gopd: 1.0.1 + has-proto: 1.0.3 + is-typed-array: 1.1.13 + + typed-array-length@1.0.6: + dependencies: + call-bind: 1.0.7 + for-each: 0.3.3 + gopd: 1.0.1 + has-proto: 1.0.3 + is-typed-array: 1.1.13 + possible-typed-array-names: 1.0.0 + + typescript@5.6.3: {} + + ua-parser-js@0.7.39: {} + + uc.micro@2.1.0: {} + + uglify-js@3.19.3: {} + + unbox-primitive@1.0.2: + dependencies: + call-bind: 1.0.7 + has-bigints: 1.0.2 + has-symbols: 1.0.3 + which-boxed-primitive: 1.0.2 + + underscore@1.13.7: {} + + undici-types@6.19.8: {} + + unicode-canonical-property-names-ecmascript@2.0.1: {} + + unicode-match-property-ecmascript@2.0.0: + dependencies: + unicode-canonical-property-names-ecmascript: 2.0.1 + unicode-property-aliases-ecmascript: 2.1.0 + + unicode-match-property-value-ecmascript@2.2.0: {} + + unicode-property-aliases-ecmascript@2.1.0: {} + + update-browserslist-db@1.1.1(browserslist@4.24.2): + dependencies: + browserslist: 4.24.2 + escalade: 3.2.0 + picocolors: 1.1.1 + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + use-callback-ref@1.3.2(@types/react@18.3.12)(react@18.3.1): + dependencies: + react: 18.3.1 + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.3.12 + + use-sidecar@1.1.2(@types/react@18.3.12)(react@18.3.1): + dependencies: + detect-node-es: 1.1.0 + react: 18.3.1 + tslib: 2.8.1 + optionalDependencies: + '@types/react': 18.3.12 + + use-sync-external-store@1.2.2(react@18.3.1): + dependencies: + react: 18.3.1 + + util-deprecate@1.0.2: {} + + web-streams-polyfill@3.3.3: {} + + webrtc-adapter@8.2.0: + dependencies: + sdp: 3.2.0 + + which-boxed-primitive@1.0.2: + dependencies: + is-bigint: 1.0.4 + is-boolean-object: 1.1.2 + is-number-object: 1.0.7 + is-string: 1.0.7 + is-symbol: 1.0.4 + + which-builtin-type@1.1.4: + dependencies: + function.prototype.name: 1.1.6 + has-tostringtag: 1.0.2 + is-async-function: 2.0.0 + is-date-object: 1.0.5 + is-finalizationregistry: 1.0.2 + is-generator-function: 1.0.10 + is-regex: 1.1.4 + is-weakref: 1.0.2 + isarray: 2.0.5 + which-boxed-primitive: 1.0.2 + which-collection: 1.0.2 + which-typed-array: 1.1.15 + + which-collection@1.0.2: + dependencies: + is-map: 2.0.3 + is-set: 2.0.3 + is-weakmap: 2.0.2 + is-weakset: 2.0.3 + + which-typed-array@1.1.15: + dependencies: + available-typed-arrays: 1.0.7 + call-bind: 1.0.7 + for-each: 0.3.3 + gopd: 1.0.1 + has-tostringtag: 1.0.2 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrap-ansi@8.1.0: + dependencies: + ansi-styles: 6.2.1 + string-width: 5.1.2 + strip-ansi: 7.1.0 + + wrappy@1.0.2: {} + + xmlcreate@2.0.4: {} + + yallist@3.1.1: {} + + yaml@2.6.0: {} + + yocto-queue@0.1.0: {} + + zod@3.23.8: {} diff --git a/demo/postcss.config.js b/demo/postcss.config.js new file mode 100644 index 0000000000000000000000000000000000000000..97f6ee0d5cfa3ed1aa5d772961b9c8a0f959c388 --- /dev/null +++ b/demo/postcss.config.js @@ -0,0 +1,12 @@ +/** @type {import('postcss-load-config').Config} */ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + "@minko-fe/postcss-pxtoviewport": { + viewportWidth: 375, + exclude: /node_modules/, + include: /\/src\/platform\/mobile\//, + } + }, +} diff --git a/demo/src/app/api/agents/start/graph.ts b/demo/src/app/api/agents/start/graph.ts new file mode 100644 index 0000000000000000000000000000000000000000..5a5376b63797d474bd589658e948f676c2349f7e --- /dev/null +++ b/demo/src/app/api/agents/start/graph.ts @@ -0,0 +1,282 @@ +import { LanguageMap } from "@/common/constant"; + +export const voiceNameMap: LanguageMap = { + "zh-CN": { + azure: { + male: "zh-CN-YunxiNeural", + female: "zh-CN-XiaoxiaoNeural", + }, + elevenlabs: { + male: "pNInz6obpgDQGcFmaJgB", // Adam + female: "Xb7hH8MSUJpSbSDYk0k2", // Alice + }, + polly: { + male: "Zhiyu", + female: "Zhiyu", + langCode: "cmn-CN", + langEngine: "neural" + }, + openai: { + male: "ash", + female: "shimmer" + } + }, + "en-US": { + azure: { + male: "en-US-AndrewMultilingualNeural", + female: "en-US-AvaMultilingualNeural", + }, + elevenlabs: { + male: "pNInz6obpgDQGcFmaJgB", // Adam + female: "Xb7hH8MSUJpSbSDYk0k2", // Alice + }, + polly: { + male: "Matthew", + female: "Ruth", + langCode: "en-US", + langEngine: "generative" + }, + openai: { + male: "ash", + female: "shimmer" + } + }, + "ja-JP": { + azure: { + male: "ja-JP-KeitaNeural", + female: "ja-JP-NanamiNeural", + }, + openai: { + male: "ash", + female: "shimmer" + } + }, + "ko-KR": { + azure: { + male: "ko-KR-InJoonNeural", + female: "ko-KR-JiMinNeural", + }, + openai: { + male: "ash", + female: "shimmer" + } + }, +}; + +// Get the graph properties based on the graph name, language, and voice type +// This is the place where you can customize the properties for different graphs to override default property.json +export const getGraphProperties = ( + graphName: string, + language: string, + voiceType: string, + prompt: string | undefined, + greeting: string | undefined +) => { + let localizationOptions = { + "greeting": "Hey, I\'m TEN Agent, I can speak, see, and reason from a knowledge base, ask me anything!", + "checking_vision_text_items": "[\"Let me take a look...\",\"Let me check your camera...\",\"Please wait for a second...\"]", + "coze_greeting": "Hey, I'm Coze Bot, I can chat with you, ask me anything!", + } + + if (language === "zh-CN") { + localizationOptions = { + "greeting": "嗨,我是 TEN Agent,我可以说话、看东西,还能从知识库中推理,问我任何问题吧!", + "checking_vision_text_items": "[\"让我看看你的摄像头...\",\"让我看一下...\",\"我看一下,请稍候...\"]", + "coze_greeting": "嗨,我是扣子机器人,我可以和你聊天,问我任何问题吧!", + } + } else if (language === "ja-JP") { + localizationOptions = { + "greeting": "こんにちは、TEN Agentです。私は話したり、見たり、知識ベースから推論したりできます。何でも聞いてください!", + "checking_vision_text_items": "[\"ちょっと見てみます...\",\"カメラをチェックします...\",\"少々お待ちください...\"]", + "coze_greeting": "こんにちは、私はCoze Botです。お話しできますので、何でも聞いてください!", + } + } else if (language === "ko-KR") { + localizationOptions = { + "greeting": "안녕하세요, 저는 TEN Agent입니다. 말하고, 보고, 지식 베이스에서 추론할 수 있어요. 무엇이든 물어보세요!", + "checking_vision_text_items": "[\"조금만 기다려 주세요...\",\"카메라를 확인해 보겠습니다...\",\"잠시만 기다려 주세요...\"]", + "coze_greeting": "안녕하세요, 저는 Coze Bot입니다. 대화할 수 있어요. 무엇이든 물어보세요!", + } + } + + let combined_greeting = greeting || localizationOptions["greeting"]; + + if (graphName == "camera_va_openai_azure") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "prompt": prompt, + "greeting": combined_greeting, + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "va_coze_azure") { + combined_greeting = greeting || localizationOptions["coze_greeting"]; + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "coze_python_async": { + "prompt": prompt, + "greeting": combined_greeting, + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "camera_va_openai_azure_rtm") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "model": "gpt-4o", + "prompt": prompt, + "greeting": combined_greeting, + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "va_openai_v2v") { + return { + "v2v": { + "model": "gpt-4o-realtime-preview-2024-12-17", + "voice": voiceNameMap[language]["openai"][voiceType], + "language": language, + "prompt": prompt, + "greeting": combined_greeting, + } + } + } else if (graphName == "va_openai_v2v_fish") { + return { + "v2v": { + "model": "gpt-4o-realtime-preview-2024-12-17", + "voice": voiceNameMap[language]["openai"][voiceType], + "language": language, + "prompt": prompt, + "greeting": combined_greeting, + }, + "agora_rtc": { + "agora_asr_language": language, + }, + } + } else if (graphName == "va_openai_azure") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "model": "gpt-4o", + "prompt": prompt, + "greeting": combined_greeting, + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "va_qwen_rag") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "azure_tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "va_gemini_v2v") { + return { + "v2v": { + "prompt": prompt, + // "greeting": combined_greeting, + } + } + } else if (graphName == "va_dify_azure") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "greeting": combined_greeting, + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "story_teller_stt_integrated") { + let story_greeting = "Hey, I'm Story Teller, I can tell story based on your imagination, say Hi to me!"; + + if (language === "zh-CN") { + story_greeting = "嗨,我是一个讲故事的机器人,我可以根据你的想象讲故事,和我打个招呼吧!"; + } else if (language === "ja-JP") { + story_greeting = "こんにちは、私はストーリーテラーです。あなたの想像に基づいて物語を語ることができます。私に挨拶してください!"; + } else if (language === "ko-KR") { + story_greeting = "안녕하세요, 저는 이야기꾼입니다. 당신의 상상력을 바탕으로 이야기를 할 수 있어요. 저에게 인사해 보세요!"; + } + + + combined_greeting = greeting || story_greeting; + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "greeting": combined_greeting, + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "va_nova_multimodal_aws") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "greeting": combined_greeting, + }, + "tts": { + "voice": voiceNameMap[language]["polly"][voiceType], + "lang_code": voiceNameMap[language]["polly"]["langCode"], + "engine": voiceNameMap[language]["polly"]["langEngine"], + }, + "stt": { + "lang_code": language, + } + } + } else if (graphName == "deepseek_r1") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "prompt": prompt, + "greeting": combined_greeting, + "model": "DeepSeek-R1", + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } else if (graphName == "qwq_32b") { + return { + "agora_rtc": { + "agora_asr_language": language, + }, + "llm": { + "prompt": prompt, + "greeting": combined_greeting, + "model": "qwq-plus", + }, + "tts": { + "azure_synthesis_voice_name": voiceNameMap[language]["azure"][voiceType] + } + } + } + + + return {} +} \ No newline at end of file diff --git a/demo/src/app/api/agents/start/route.ts b/demo/src/app/api/agents/start/route.ts new file mode 100644 index 0000000000000000000000000000000000000000..09f7f6192c1904eb97cc18971cd95ee5aa72fb89 --- /dev/null +++ b/demo/src/app/api/agents/start/route.ts @@ -0,0 +1,78 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { getGraphProperties } from './graph'; +import axios from 'axios'; +/** + * Handles the POST request to start an agent. + * + * @param request - The NextRequest object representing the incoming request. + * @returns A NextResponse object representing the response to be sent back to the client. + */ +export async function POST(request: NextRequest) { + try { + const { AGENT_SERVER_URL } = process.env; + + // Check if environment variables are available + if (!AGENT_SERVER_URL) { + throw "Environment variables are not available"; + } + + const body = await request.json(); + const { + request_id, + channel_name, + user_uid, + graph_name, + language, + voice_type, + prompt, + greeting, + coze_token, + coze_bot_id, + coze_base_url, + dify_api_key, + } = body; + + let properties: any = getGraphProperties(graph_name, language, voice_type, prompt, greeting); + if (graph_name.includes("coze")) { + properties["coze_python_async"]["token"] = coze_token; + properties["coze_python_async"]["bot_id"] = coze_bot_id; + properties["coze_python_async"]["base_url"] = coze_base_url; + } + if (graph_name.includes("dify")) { + properties["llm"]["api_key"] = dify_api_key; + } + + console.log(`Starting agent for request ID: ${JSON.stringify({ + request_id, + channel_name, + user_uid, + graph_name, + // Get the graph properties based on the graph name, language, and voice type + properties, + })}`); + + console.log(`AGENT_SERVER_URL: ${AGENT_SERVER_URL}/start`); + + // Send a POST request to start the agent + const response = await axios.post(`${AGENT_SERVER_URL}/start`, { + request_id, + channel_name, + user_uid, + graph_name, + // Get the graph properties based on the graph name, language, and voice type + properties, + }); + + const responseData = response.data; + + return NextResponse.json(responseData, { status: response.status }); + } catch (error) { + if (error instanceof Response) { + const errorData = await error.json(); + return NextResponse.json(errorData, { status: error.status }); + } else { + console.error(`Error starting agent: ${error}`); + return NextResponse.json({ code: "1", data: null, msg: "Internal Server Error" }, { status: 500 }); + } + } +} \ No newline at end of file diff --git a/demo/src/app/favicon.ico b/demo/src/app/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..92b8b6ba0c4d802bbfe42381be3e83e83f166f72 Binary files /dev/null and b/demo/src/app/favicon.ico differ diff --git a/demo/src/app/global.css b/demo/src/app/global.css new file mode 100644 index 0000000000000000000000000000000000000000..f321d05f8a371aff17cfb5ea1b1a077bdd41bc0f --- /dev/null +++ b/demo/src/app/global.css @@ -0,0 +1,157 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +* { + box-sizing: border-box; + padding: 0; + margin: 0; +} + +html, +body { + background-color: #0f0f11; + font-family: "PingFang SC"; + height: 100%; +} + +a { + color: inherit; + text-decoration: none; +} + +@media (prefers-color-scheme: dark) { + html { + color-scheme: dark; + } +} + +.ant-select-arrow { + color: #667085 !important; +} + +.ant-select-selection-item { + color: #667085 !important; +} + +.ant-select-selector { + border: 1px solid #272a2f !important; + background-color: #272a2f !important; +} + +.ant-select-dropdown { + background-color: #1e2024 !important; +} + +.ant-select-item { + background: #1e2024 !important; + color: var(--Grey-600, #667085) !important; +} + +.ant-select-item-option-selected { + background: #272a2f !important; + color: var(--Grey-300, #eaecf0) !important; +} + +.ant-popover-inner { + /* width: 260px !important; */ + background: #1e2025 !important; +} + +.ant-select-selection-placeholder { + color: var(--Grey-600, #667085) !important; +} + +.ant-empty-description { + color: var(--Grey-600, #667085) !important; +} + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 0 0% 3.9%; + --card: 0 0% 100%; + --card-foreground: 0 0% 3.9%; + --popover: 0 0% 100%; + --popover-foreground: 0 0% 3.9%; + --primary: 0 0% 9%; + --primary-foreground: 0 0% 98%; + --secondary: 0 0% 96.1%; + --secondary-foreground: 0 0% 9%; + --muted: 0 0% 96.1%; + --muted-foreground: 0 0% 45.1%; + --accent: 0 0% 96.1%; + --accent-foreground: 0 0% 9%; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --border: 0 0% 89.8%; + --input: 0 0% 89.8%; + --ring: 0 0% 3.9%; + --chart-1: 12 76% 61%; + --chart-2: 173 58% 39%; + --chart-3: 197 37% 24%; + --chart-4: 43 74% 66%; + --chart-5: 27 87% 67%; + --radius: 0.5rem; + } + .dark { + --background: 0 0% 3.9%; + --foreground: 0 0% 98%; + --card: 0 0% 3.9%; + --card-foreground: 0 0% 98%; + --popover: 0 0% 3.9%; + --popover-foreground: 0 0% 98%; + --primary: 0 0% 98%; + --primary-foreground: 0 0% 9%; + --secondary: 0 0% 14.9%; + --secondary-foreground: 0 0% 98%; + --muted: 0 0% 14.9%; + --muted-foreground: 0 0% 63.9%; + --accent: 0 0% 14.9%; + --accent-foreground: 0 0% 98%; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 0% 98%; + --border: 0 0% 14.9%; + --input: 0 0% 14.9%; + --ring: 0 0% 83.1%; + --chart-1: 220 70% 50%; + --chart-2: 160 60% 45%; + --chart-3: 30 80% 55%; + --chart-4: 280 65% 60%; + --chart-5: 340 75% 55%; + } +} + +@layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + } +} + +/* Custom Scrollbar Styles */ +::-webkit-scrollbar { + width: 10px; +} + +::-webkit-scrollbar-track { + background: transparent; + border-radius: 5px; +} + +::-webkit-scrollbar-thumb { + background: hsl(var(--muted)); + border-radius: 5px; +} + +::-webkit-scrollbar-thumb:hover { + background: hsl(var(--muted-foreground)); +} + +/* For Firefox */ +* { + scrollbar-width: thin; + scrollbar-color: hsl(var(--muted)) transparent; +} diff --git a/demo/src/app/index.module.css b/demo/src/app/index.module.css new file mode 100644 index 0000000000000000000000000000000000000000..408f2d7b530ed840fdfd3a01961a9bcccad4688d --- /dev/null +++ b/demo/src/app/index.module.css @@ -0,0 +1,94 @@ +@media (max-width: 1400px) { + .login { + position: absolute; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: hidden; + background: url("../assets/background.jpg") no-repeat center center; + background-size: cover; + box-sizing: border-box; + } + + .starts { + width: 1px; + height: 1px; + background: transparent; + box-shadow: + 145px 234px #fff, + 876px 543px #fff; + animation: animStar 50s linear infinite; + } + + .starts2 { + width: 2px; + height: 2px; + box-shadow: + 445px 234px #fff, + 276px 943px #fff; + animation: animStar 100s linear infinite; + } + + .starts3 { + width: 3px; + height: 3px; + background: transparent; + box-shadow: + 745px 834px #fff, + 176px 243px #fff; + animation: animStar 150s linear infinite; + } +} + +@media (min-width: 1400px) { + .login { + position: absolute; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: hidden; + background: url("../assets/background.jpg") no-repeat center center; + background-size: cover; + box-sizing: border-box; + } + + .starts { + width: 1px; + height: 1px; + background: transparent; + box-shadow: + 45vw 34vh #fff, + 76vw 43vh #fff; + animation: animStar 50s linear infinite; + } + + .starts2 { + width: 2px; + height: 2px; + box-shadow: + 145vw 134vh #fff, + 76vw 143vh #fff; + animation: animStar 100s linear infinite; + } + + .starts3 { + width: 3px; + height: 3px; + background: transparent; + box-shadow: + 45vw 134vh #fff, + 176vw 43vh #fff; + animation: animStar 150s linear infinite; + } +} + +@keyframes animStar { + from { + transform: translateY(0px); + } + to { + transform: translateY(-2000px); + } +} diff --git a/demo/src/app/layout.tsx b/demo/src/app/layout.tsx new file mode 100644 index 0000000000000000000000000000000000000000..1cf7870b234229049d350934b0abb66ebd8bb39d --- /dev/null +++ b/demo/src/app/layout.tsx @@ -0,0 +1,57 @@ +import { StoreProvider } from "@/store" +import type { Metadata, Viewport } from "next" +import "./global.css" +import { Toaster } from "@/components/ui/sonner" +import { Roboto } from "next/font/google" +import { cn } from "@/lib/utils" + +const roboto = Roboto({ + subsets: ["latin"], + weight: ["400", "700"], + variable: "--font-roboto", + display: "swap", +}) + +export const metadata: Metadata = { + title: "Real Agent", + description: + "", + appleWebApp: { + capable: true, + statusBarStyle: "black", + }, +} + +export const viewport: Viewport = { + width: "device-width", + initialScale: 1, + minimumScale: 1, + maximumScale: 1, + userScalable: false, + viewportFit: "cover", +} + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode +}>) { + return ( + + + {/* */} + {children} + {/* */} + + + + ) +} diff --git a/demo/src/app/page.tsx b/demo/src/app/page.tsx new file mode 100644 index 0000000000000000000000000000000000000000..7f109160fe3250658d9fd8bb1da3a9f0754b8e1a --- /dev/null +++ b/demo/src/app/page.tsx @@ -0,0 +1,52 @@ +"use client" + +import { useAppSelector, EMobileActiveTab } from "@/common" +import dynamic from "next/dynamic" + +import Header from "@/components/Layout/Header" +import Action from "@/components/Layout/Action" +// import RTCCard from "@/components/Dynamic/RTCCard" +// import ChatCard from "@/components/Chat/ChatCard" +import AuthInitializer from "@/components/authInitializer" +import { cn } from "@/lib/utils" + +const DynamicRTCCard = dynamic(() => import("@/components/Dynamic/RTCCard"), { + ssr: false, +}) + +const DynamicChatCard = dynamic(() => import("@/components/Chat/ChatCard"), { + ssr: false, +}) + +export default function Home() { + const mobileActiveTab = useAppSelector( + (state) => state.global.mobileActiveTab, + ) + + return ( + +
+
+ +
+ + +
+
+
+ ) +} diff --git a/demo/src/assets/background.jpg b/demo/src/assets/background.jpg new file mode 100644 index 0000000000000000000000000000000000000000..027623438677ced6d9c14ca92075cc8c6486158e Binary files /dev/null and b/demo/src/assets/background.jpg differ diff --git a/demo/src/assets/cam_mute.svg b/demo/src/assets/cam_mute.svg new file mode 100644 index 0000000000000000000000000000000000000000..a4640ae4dbcfc7e0aad4216ea8cb8f2868bdbd94 --- /dev/null +++ b/demo/src/assets/cam_mute.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/cam_unmute.svg b/demo/src/assets/cam_unmute.svg new file mode 100644 index 0000000000000000000000000000000000000000..1eebfaa63f5265ebf27d72e87f1c01a9983e70aa --- /dev/null +++ b/demo/src/assets/cam_unmute.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/color_picker.svg b/demo/src/assets/color_picker.svg new file mode 100644 index 0000000000000000000000000000000000000000..fb9bb33e2acb184fd2fed28cf4a088063356f0df --- /dev/null +++ b/demo/src/assets/color_picker.svg @@ -0,0 +1,17 @@ + + + + + + + + + + diff --git a/demo/src/assets/github.svg b/demo/src/assets/github.svg new file mode 100644 index 0000000000000000000000000000000000000000..e6566c4143d01298a02f32a41b35bc8f8b5a26da --- /dev/null +++ b/demo/src/assets/github.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/info.svg b/demo/src/assets/info.svg new file mode 100644 index 0000000000000000000000000000000000000000..8ca99511460b6f5f6c8c01b4ec6803edc718f3dd --- /dev/null +++ b/demo/src/assets/info.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/logo.svg b/demo/src/assets/logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..af99893ae2c7f8be8cbff64404530a7a224af629 --- /dev/null +++ b/demo/src/assets/logo.svg @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/demo/src/assets/logo_small.svg b/demo/src/assets/logo_small.svg new file mode 100644 index 0000000000000000000000000000000000000000..34e755bdc4abc1d58607bf4c72db5be2d3f837af --- /dev/null +++ b/demo/src/assets/logo_small.svg @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/demo/src/assets/mic_mute.svg b/demo/src/assets/mic_mute.svg new file mode 100644 index 0000000000000000000000000000000000000000..dd4a17ddf291ff805f3fbc4e8582b1243a40c741 --- /dev/null +++ b/demo/src/assets/mic_mute.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/mic_unmute.svg b/demo/src/assets/mic_unmute.svg new file mode 100644 index 0000000000000000000000000000000000000000..18e78236f65442bea2d6b1f05728d706c4ed1b99 --- /dev/null +++ b/demo/src/assets/mic_unmute.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/network/average.svg b/demo/src/assets/network/average.svg new file mode 100644 index 0000000000000000000000000000000000000000..9a27072f8217c8bdf9f585c69c298fdf6cf05f03 --- /dev/null +++ b/demo/src/assets/network/average.svg @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/demo/src/assets/network/disconnected.svg b/demo/src/assets/network/disconnected.svg new file mode 100644 index 0000000000000000000000000000000000000000..b7db1d719bd64f93aec676d99a598bc28b4b5659 --- /dev/null +++ b/demo/src/assets/network/disconnected.svg @@ -0,0 +1,9 @@ + + + + + diff --git a/demo/src/assets/network/excellent.svg b/demo/src/assets/network/excellent.svg new file mode 100644 index 0000000000000000000000000000000000000000..55b9fc9e6fdfc6e21a015438fb380fd984ce17f4 --- /dev/null +++ b/demo/src/assets/network/excellent.svg @@ -0,0 +1,6 @@ + + + + diff --git a/demo/src/assets/network/good.svg b/demo/src/assets/network/good.svg new file mode 100644 index 0000000000000000000000000000000000000000..8c36a7e792a56b902bf51cb31371f4237832c87d --- /dev/null +++ b/demo/src/assets/network/good.svg @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/demo/src/assets/network/poor.svg b/demo/src/assets/network/poor.svg new file mode 100644 index 0000000000000000000000000000000000000000..d9df02385e2c6debba2a5f9e17218ff05b4e5d1f --- /dev/null +++ b/demo/src/assets/network/poor.svg @@ -0,0 +1,7 @@ + + + + + + + \ No newline at end of file diff --git a/demo/src/assets/pdf.svg b/demo/src/assets/pdf.svg new file mode 100644 index 0000000000000000000000000000000000000000..dc67f4d571a370de1fcebecb869db3987b9b1e9e --- /dev/null +++ b/demo/src/assets/pdf.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/assets/transcription.svg b/demo/src/assets/transcription.svg new file mode 100644 index 0000000000000000000000000000000000000000..8b887a6ff0a547ce4e4109100fdad447108e47fe --- /dev/null +++ b/demo/src/assets/transcription.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/demo/src/assets/voice.svg b/demo/src/assets/voice.svg new file mode 100644 index 0000000000000000000000000000000000000000..86a880b05369977d6260c298344f2b8ab1278802 --- /dev/null +++ b/demo/src/assets/voice.svg @@ -0,0 +1,3 @@ + + + diff --git a/demo/src/common/constant.ts b/demo/src/common/constant.ts new file mode 100644 index 0000000000000000000000000000000000000000..c81d8554cc171a9d26237ddeb6ca7f0cf3065fe0 --- /dev/null +++ b/demo/src/common/constant.ts @@ -0,0 +1,194 @@ +import { + IOptions, + ColorItem, + LanguageOptionItem, + VoiceOptionItem, + GraphOptionItem, + ICozeSettings, + IDifySettings, +} from "@/types" +export const GITHUB_URL = "https://github.com/TEN-framework/TEN-Agent" +export const API_GH_GET_REPO_INFO = + "https://api.github.com/repos/TEN-framework/TEN-Agent" +export const OPTIONS_KEY = "__options__" +export const AGENT_SETTINGS_KEY = "__agent_settings__" +export const COZE_SETTINGS_KEY = "__coze_settings__" +export const DIFY_SETTINGS_KEY = "__dify_settings__" +export const DEFAULT_OPTIONS: IOptions = { + channel: "", + userName: "", + userId: 0, + appId: "", + token: "", +} + +export const DEFAULT_AGENT_SETTINGS = { + greeting: "", + prompt: "", +} + +export enum ECozeBaseUrl { + CN = "https://api.coze.cn", + GLOBAL = "https://api.coze.com", +} + +export const DEFAULT_COZE_SETTINGS: ICozeSettings = { + token: "", + bot_id: "", + base_url: ECozeBaseUrl.GLOBAL, +} + +export const DEFAULT_DIFY_SETTINGS: IDifySettings = { + api_key: "", +} + +export const DESCRIPTION = "A Realtime Conversational AI Agent powered by TEN" +export const LANGUAGE_OPTIONS: LanguageOptionItem[] = [ + { + label: "English", + value: "en-US", + }, + { + label: "Chinese", + value: "zh-CN", + }, + { + label: "Korean", + value: "ko-KR", + }, + { + label: "Japanese", + value: "ja-JP", + }, +] +export const GRAPH_OPTIONS: GraphOptionItem[] = [ + { + label: "Voice Agent with QWQ-32B Reasoning", + value: "qwq_32b", + }, + { + label: "Voice Agent with DeepSeek R1 Reasoning", + value: "deepseek_r1", + }, + { + label: "Voice Agent Gemini 2.0 Realtime", + value: "va_gemini_v2v", + }, + { + label: "Voice Agent with Dify", + value: "va_dify_azure", + }, + { + label: "Voice Agent / STT + LLM + TTS", + value: "va_openai_azure", + }, + // { + // label: "Voice Agent with Knowledge - RAG + Qwen LLM + Cosy TTS", + // value: "va_qwen_rag" + // }, + { + label: "Voice Agent OpenAI Realtime", + value: "va_openai_v2v", + }, + { + label: "Voice Agent OpenAI Realtime + Custom STT/TTS", + value: "va_openai_v2v_fish", + }, + { + label: "Voice Agent Coze Bot + Azure TTS", + value: "va_coze_azure", + }, + { + label: "Voice Story Teller with Image Generator", + value: "story_teller_stt_integrated", + }, + { + label: "Voice Agent / STT + Nova Multimodal + TTS", + value: "va_nova_multimodal_aws", + }, +] + +export const isRagGraph = (graphName: string) => { + return graphName === "va_qwen_rag" +} + +export const isLanguageSupported = (graphName: string) => { + return !["va_gemini_v2v"].includes(graphName) +} + +export const isVoiceGenderSupported = (graphName: string) => { + return !["va_gemini_v2v"].includes(graphName) +} + + +export enum VideoSourceType { + CAMERA = 'camera', + SCREEN = 'screen', +} + +export const VIDEO_SOURCE_OPTIONS = [{ + label: "Camera", + value: VideoSourceType.CAMERA, +}, { + label: "Screen Share", + value: VideoSourceType.SCREEN, +}] + +export const VOICE_OPTIONS: VoiceOptionItem[] = [ + { + label: "Male", + value: "male", + }, + { + label: "Female", + value: "female", + }, +] +export const COLOR_LIST: ColorItem[] = [ + { + active: "#0888FF", + default: "#143354", + }, + { + active: "#563FD8", + default: "#2C2553", + }, + { + active: "#18A957", + default: "#173526", + }, + { + active: "#FFAB08", + default: "#423115", + }, + { + active: "#FD5C63", + default: "#462629", + }, + { + active: "#E225B2", + default: "#481C3F", + }, +] + +export type VoiceTypeMap = { + [voiceType: string]: string +} + +export type VendorNameMap = { + [vendorName: string]: VoiceTypeMap +} + +export type LanguageMap = { + [language: string]: VendorNameMap +} + +export enum EMobileActiveTab { + AGENT = "agent", + CHAT = "chat", +} + +export const MOBILE_ACTIVE_TAB_MAP = { + [EMobileActiveTab.AGENT]: "Agent", + [EMobileActiveTab.CHAT]: "Chat", +} \ No newline at end of file diff --git a/demo/src/common/hooks.ts b/demo/src/common/hooks.ts new file mode 100644 index 0000000000000000000000000000000000000000..8bb87c5392661df14bff279b4b6aad2a0efc56da --- /dev/null +++ b/demo/src/common/hooks.ts @@ -0,0 +1,126 @@ +"use client" + +import { IMicrophoneAudioTrack } from "agora-rtc-sdk-ng" +import { normalizeFrequencies } from "./utils" +import { useState, useEffect, useMemo, useRef } from "react" +import type { AppDispatch, AppStore, RootState } from "../store" +import { useDispatch, useSelector, useStore } from "react-redux" + +export const useAppDispatch = useDispatch.withTypes() +export const useAppSelector = useSelector.withTypes() +export const useAppStore = useStore.withTypes() + +export const useMultibandTrackVolume = ( + track?: IMicrophoneAudioTrack | MediaStreamTrack, + bands: number = 5, + loPass: number = 100, + hiPass: number = 600, +) => { + const [frequencyBands, setFrequencyBands] = useState([]) + + useEffect(() => { + if (!track) { + return setFrequencyBands(new Array(bands).fill(new Float32Array(0))) + } + + const ctx = new AudioContext() + let finTrack = + track instanceof MediaStreamTrack ? track : track.getMediaStreamTrack() + const mediaStream = new MediaStream([finTrack]) + const source = ctx.createMediaStreamSource(mediaStream) + const analyser = ctx.createAnalyser() + analyser.fftSize = 2048 + + source.connect(analyser) + + const bufferLength = analyser.frequencyBinCount + const dataArray = new Float32Array(bufferLength) + + const updateVolume = () => { + analyser.getFloatFrequencyData(dataArray) + let frequencies: Float32Array = new Float32Array(dataArray.length) + for (let i = 0; i < dataArray.length; i++) { + frequencies[i] = dataArray[i] + } + frequencies = frequencies.slice(loPass, hiPass) + + const normalizedFrequencies = normalizeFrequencies(frequencies) + const chunkSize = Math.ceil(normalizedFrequencies.length / bands) + const chunks: Float32Array[] = [] + for (let i = 0; i < bands; i++) { + chunks.push( + normalizedFrequencies.slice(i * chunkSize, (i + 1) * chunkSize), + ) + } + + setFrequencyBands(chunks) + } + + const interval = setInterval(updateVolume, 10) + + return () => { + source.disconnect() + clearInterval(interval) + } + }, [track, loPass, hiPass, bands]) + + return frequencyBands +} + +export const useAutoScroll = (ref: React.RefObject) => { + const callback: MutationCallback = (mutationList, observer) => { + mutationList.forEach((mutation) => { + switch (mutation.type) { + case "childList": + if (!ref.current) { + return + } + ref.current.scrollTop = ref.current.scrollHeight + break + } + }) + } + + useEffect(() => { + if (!ref.current) { + return + } + const observer = new MutationObserver(callback) + observer.observe(ref.current, { + childList: true, + subtree: true, + }) + + return () => { + observer.disconnect() + } + }, [ref]) +} + +// export const useSmallScreen = () => { +// const screens = useBreakpoint(); + +// const xs = useMemo(() => { +// return !screens.sm && screens.xs +// }, [screens]) + +// const sm = useMemo(() => { +// return !screens.md && screens.sm +// }, [screens]) + +// return { +// xs, +// sm, +// isSmallScreen: xs || sm +// } +// } + +export const usePrevious = (value: any) => { + const ref = useRef() + + useEffect(() => { + ref.current = value + }, [value]) + + return ref.current +} diff --git a/demo/src/common/index.ts b/demo/src/common/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..3c2b0300e9c09f82fa36ba6c5d2036b41ec96621 --- /dev/null +++ b/demo/src/common/index.ts @@ -0,0 +1,6 @@ +export * from "./hooks" +export * from "./constant" +export * from "./utils" +export * from "./storage" +export * from "./request" +export * from "./mock" diff --git a/demo/src/common/mock.ts b/demo/src/common/mock.ts new file mode 100644 index 0000000000000000000000000000000000000000..16590cef32b6aef6a2902b0a456b0188df1ef628 --- /dev/null +++ b/demo/src/common/mock.ts @@ -0,0 +1,39 @@ +import { getRandomUserId } from "./utils" +import { IChatItem, EMessageType, EMessageDataType } from "@/types" + +const SENTENCES = [ + "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", + "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium.", + "Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit.", + "Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit.", + "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.", + "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.", + "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", +] + +export const genRandomParagraph = (num: number = 0): string => { + let paragraph = "" + for (let i = 0; i < num; i++) { + const randomIndex = Math.floor(Math.random() * SENTENCES.length) + paragraph += SENTENCES[randomIndex] + " " + } + + return paragraph.trim() +} + +export const genRandomChatList = (num: number = 10): IChatItem[] => { + const arr: IChatItem[] = [] + for (let i = 0; i < num; i++) { + const type = Math.random() > 0.5 ? EMessageType.AGENT : EMessageType.USER + arr.push({ + userId: getRandomUserId(), + userName: type == "agent" ? "Agent" : "You", + text: genRandomParagraph(3), + data_type: EMessageDataType.TEXT, + type, + time: Date.now(), + }) + } + + return arr +} diff --git a/demo/src/common/request.ts b/demo/src/common/request.ts new file mode 100644 index 0000000000000000000000000000000000000000..fd47c6eef9c65eb905af2c4a2999f04cbd5ae0fb --- /dev/null +++ b/demo/src/common/request.ts @@ -0,0 +1,128 @@ +import { genUUID } from "./utils" +import { Language } from "@/types" +import axios from "axios" + +export interface StartRequestConfig { + channel: string + userId: number + graphName: string + language: Language + voiceType: "male" | "female" + prompt?: string + greeting?: string + coze_token?: string + coze_bot_id?: string + coze_base_url?: string + dify_api_key?: string +} + +interface GenAgoraDataConfig { + userId: string | number + channel: string +} + +export const apiGenAgoraData = async (config: GenAgoraDataConfig) => { + // the request will be rewrite at next.config.mjs to send to $AGENT_SERVER_URL + const url = `/api/token/generate` + const { userId, channel } = config + const data = { + request_id: genUUID(), + uid: userId, + channel_name: channel, + } + let resp: any = await axios.post(url, data) + resp = resp.data || {} + return resp +} + +export const apiStartService = async ( + config: StartRequestConfig, +): Promise => { + // look at app/api/agents/start/route.tsx for the server-side implementation + const url = `/api/agents/start` + const { + channel, + userId, + graphName, + language, + voiceType, + greeting, + prompt, + coze_token, + coze_bot_id, + coze_base_url, + dify_api_key, + } = config + const data = { + request_id: genUUID(), + channel_name: channel, + user_uid: userId, + graph_name: graphName, + language, + voice_type: voiceType, + greeting: greeting ?? undefined, + prompt: prompt ?? undefined, + coze_token: coze_token ?? undefined, + coze_bot_id: coze_bot_id ?? undefined, + coze_base_url: coze_base_url ?? undefined, + dify_api_key: dify_api_key ?? undefined + } + let resp: any = await axios.post(url, data) + resp = resp.data || {} + return resp +} + +export const apiStopService = async (channel: string) => { + // the request will be rewrite at middleware.tsx to send to $AGENT_SERVER_URL + const url = `/api/agents/stop` + const data = { + request_id: genUUID(), + channel_name: channel, + } + let resp: any = await axios.post(url, data) + resp = resp.data || {} + return resp +} + +export const apiGetDocumentList = async () => { + // the request will be rewrite at middleware.tsx to send to $AGENT_SERVER_URL + const url = `/api/vector/document/preset/list` + let resp: any = await axios.get(url) + resp = resp.data || {} + if (resp.code !== "0") { + throw new Error(resp.msg) + } + return resp +} + +export const apiUpdateDocument = async (options: { + channel: string + collection: string + fileName: string +}) => { + // the request will be rewrite at middleware.tsx to send to $AGENT_SERVER_URL + const url = `/api/vector/document/update` + const { channel, collection, fileName } = options + const data = { + request_id: genUUID(), + channel_name: channel, + collection: collection, + file_name: fileName, + } + let resp: any = await axios.post(url, data) + resp = resp.data || {} + return resp +} + +// ping/pong +export const apiPing = async (channel: string) => { + // the request will be rewrite at middleware.tsx to send to $AGENT_SERVER_URL + const url = `/api/agents/ping` + const data = { + request_id: genUUID(), + channel_name: channel, + } + let resp: any = await axios.post(url, data) + resp = resp.data || {} + return resp +} diff --git a/demo/src/common/storage.ts b/demo/src/common/storage.ts new file mode 100644 index 0000000000000000000000000000000000000000..845a98729c9c7756538ff6ce90ae84bc3aa165cf --- /dev/null +++ b/demo/src/common/storage.ts @@ -0,0 +1,88 @@ +import { IAgentSettings, IOptions, ICozeSettings, IDifySettings } from "@/types" +import { + OPTIONS_KEY, + DEFAULT_OPTIONS, + AGENT_SETTINGS_KEY, + DEFAULT_AGENT_SETTINGS, + COZE_SETTINGS_KEY, + DEFAULT_COZE_SETTINGS, + DIFY_SETTINGS_KEY, + DEFAULT_DIFY_SETTINGS, +} from "./constant" + +export const getOptionsFromLocal = (): { + options: IOptions + settings: IAgentSettings + cozeSettings: ICozeSettings + difySettings: IDifySettings +} => { + let data = { + options: DEFAULT_OPTIONS, + settings: DEFAULT_AGENT_SETTINGS, + cozeSettings: DEFAULT_COZE_SETTINGS, + difySettings: DEFAULT_DIFY_SETTINGS, + } + if (typeof window !== "undefined") { + const options = localStorage.getItem(OPTIONS_KEY) + if (options) { + data.options = JSON.parse(options) + } + const settings = localStorage.getItem(AGENT_SETTINGS_KEY) + if (settings) { + data.settings = JSON.parse(settings) + } + const cozeSettings = localStorage.getItem(COZE_SETTINGS_KEY) + if (cozeSettings) { + data.cozeSettings = JSON.parse(cozeSettings) + } + const difySettings = localStorage.getItem(DIFY_SETTINGS_KEY) + if (difySettings) { + data.difySettings = JSON.parse(difySettings) + } + } + return data +} + +export const setOptionsToLocal = (options: IOptions) => { + if (typeof window !== "undefined") { + localStorage.setItem(OPTIONS_KEY, JSON.stringify(options)) + } +} + +export const setAgentSettingsToLocal = (settings: IAgentSettings) => { + if (typeof window !== "undefined") { + localStorage.setItem(AGENT_SETTINGS_KEY, JSON.stringify(settings)) + } +} + +export const setCozeSettingsToLocal = (settings: ICozeSettings) => { + if (typeof window !== "undefined") { + localStorage.setItem(COZE_SETTINGS_KEY, JSON.stringify(settings)) + } +} + +export const setDifySettingsToLocal = (settings: IDifySettings) => { + if (typeof window !== "undefined") { + localStorage.setItem(DIFY_SETTINGS_KEY, JSON.stringify(settings)) + } +} + +export const resetSettingsByKeys = (keys: string | string[]) => { + if (typeof window !== "undefined") { + if (Array.isArray(keys)) { + keys.forEach((key) => { + localStorage.removeItem(key) + }) + } else { + localStorage.removeItem(keys) + } + } +} + +export const resetCozeSettings = () => { + resetSettingsByKeys(COZE_SETTINGS_KEY) +} + +export const resetDifySettings = () => { + resetSettingsByKeys(DIFY_SETTINGS_KEY) +} diff --git a/demo/src/common/utils.ts b/demo/src/common/utils.ts new file mode 100644 index 0000000000000000000000000000000000000000..96bf8e4d76ef77663c8e4d4818afbe47a8a1ec6e --- /dev/null +++ b/demo/src/common/utils.ts @@ -0,0 +1,89 @@ +import { LanguageOptionItem } from "@/types" + +export const genRandomString = (length: number = 10) => { + let result = ''; + const characters = 'abcdefghijklmnopqrstuvwxyz0123456789'; + const charactersLength = characters.length; + + for (let i = 0; i < length; i++) { + result += characters.charAt(Math.floor(Math.random() * charactersLength)); + } + + return result; +} + + +export const getRandomUserId = (): number => { + return Math.floor(Math.random() * 99999) + 100000 +} + +export const getRandomChannel = (number = 6) => { + return "agora_" + genRandomString(number) +} + + +export const sleep = (ms: number) => { + return new Promise(resolve => setTimeout(resolve, ms)); +} + + +export const normalizeFrequencies = (frequencies: Float32Array) => { + const normalizeDb = (value: number) => { + const minDb = -100; + const maxDb = -10; + let db = 1 - (Math.max(minDb, Math.min(maxDb, value)) * -1) / 100; + db = Math.sqrt(db); + + return db; + }; + + // Normalize all frequency values + return frequencies.map((value) => { + if (value === -Infinity) { + return 0; + } + return normalizeDb(value); + }); +}; + + +export const genUUID = () => { + return "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, function (c) { + const r = (Math.random() * 16) | 0 + const v = c === "x" ? r : (r & 0x3) | 0x8 + return v.toString(16) + }) +} + + +export const isMobile = () => { + return /Mobile|iPhone|iPad|Android|Windows Phone/i.test(navigator.userAgent) +} + + +export const getBrowserLanguage = (): LanguageOptionItem => { + const lang = navigator.language; + + switch (true) { + case lang.startsWith("zh"): + return { + label: "Chinese", + value: "zh-CN" + }; + case lang.startsWith("ko"): + return { + label: "Korean", + value: "ko-KR" + }; + case lang.startsWith("ja"): + return { + label: "Japanese", + value: "ja-JP" + }; + default: + return { + label: "English", + value: "en-US" + }; + } +}; \ No newline at end of file diff --git a/demo/src/components/Agent/AudioVisualizer.tsx b/demo/src/components/Agent/AudioVisualizer.tsx new file mode 100644 index 0000000000000000000000000000000000000000..b4ca850c7c56d276e50a59cd4f6f316acab25655 --- /dev/null +++ b/demo/src/components/Agent/AudioVisualizer.tsx @@ -0,0 +1,52 @@ +export interface AudioVisualizerProps { + type: "agent" | "user" + frequencies: Float32Array[] + gap: number + barWidth: number + minBarHeight: number + maxBarHeight: number + borderRadius: number +} + +export default function AudioVisualizer(props: AudioVisualizerProps) { + const { + frequencies, + gap, + barWidth, + minBarHeight, + maxBarHeight, + borderRadius, + type, + } = props + + const summedFrequencies = frequencies.map((bandFrequencies) => { + const sum = bandFrequencies.reduce((a, b) => a + b, 0) + if (sum <= 0) { + return 0 + } + return Math.sqrt(sum / bandFrequencies.length) + }) + + return ( +
+ {summedFrequencies.map((frequency, index) => { + const style = { + height: + minBarHeight + frequency * (maxBarHeight - minBarHeight) + "px", + borderRadius: borderRadius + "px", + width: barWidth + "px", + transition: + "background-color 0.35s ease-out, transform 0.25s ease-out", + // transform: transform, + backgroundColor: type === "agent" ? "#0888FF" : "#EAECF0", + boxShadow: type === "agent" ? "0 0 10px #EAECF0" : "none", + } + + return + })} +
+ ) +} diff --git a/demo/src/components/Agent/Camera.tsx b/demo/src/components/Agent/Camera.tsx new file mode 100644 index 0000000000000000000000000000000000000000..fb331b2513503445446ff8c79a81329970dd0c73 --- /dev/null +++ b/demo/src/components/Agent/Camera.tsx @@ -0,0 +1,166 @@ +"use client" + +import * as React from "react" +// import CamSelect from "./camSelect" +import { CamIconByStatus } from "@/components/Icon" +import AgoraRTC, { ICameraVideoTrack, ILocalVideoTrack } from "agora-rtc-sdk-ng" +// import { LocalStreamPlayer } from "../streamPlayer" +// import { useSmallScreen } from "@/common" +import { + DeviceSelect, +} from "@/components/Agent/Microphone" +import { LocalStreamPlayer } from "@/components/Agent/StreamPlayer" +import { VIDEO_SOURCE_OPTIONS, VideoSourceType } from "@/common/constant" +import { MonitorIcon, MonitorXIcon } from "lucide-react" +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "../ui/select" +import { Button } from "../ui/button" + + +export const ScreenIconByStatus = ( + props: React.SVGProps & { active?: boolean; color?: string }, +) => { + const { active, color, ...rest } = props + if (active) { + return + } + return +} + +export function VideoDeviceWrapper(props: { + children: React.ReactNode + title: string + Icon: ( + props: React.SVGProps & { active?: boolean }, + ) => React.ReactNode + onIconClick: () => void + videoSourceType: VideoSourceType + onVideoSourceChange: (value: VideoSourceType) => void + isActive: boolean + select?: React.ReactNode +}) { + const { Icon, onIconClick, isActive, select, children, onVideoSourceChange, videoSourceType } = props + + return ( +
+
+
+
{props.title}
+
+ +
+
+
+ + {select} +
+
+ {children} +
+ ) +} + +export default function VideoBlock(props: { + videoSourceType:VideoSourceType, + onVideoSourceChange:(value: VideoSourceType) => void, + cameraTrack?: ICameraVideoTrack, + screenTrack?: ILocalVideoTrack +}) { + const { videoSourceType, cameraTrack, screenTrack, onVideoSourceChange } = props + const [videoMute, setVideoMute] = React.useState(false) + + React.useEffect(() => { + cameraTrack?.setMuted(videoMute) + screenTrack?.setMuted(videoMute) + }, [cameraTrack, screenTrack, videoMute]) + + const onClickMute = () => { + setVideoMute(!videoMute) + } + + return ( + :
} + > +
+ +
+ + ) +} + +interface SelectItem { + label: string + value: string + deviceId: string +} + +const DEFAULT_ITEM: SelectItem = { + label: "Default", + value: "default", + deviceId: "", +} + +const CamSelect = (props: { videoTrack?: ICameraVideoTrack }) => { + const { videoTrack } = props + const [items, setItems] = React.useState([DEFAULT_ITEM]) + const [value, setValue] = React.useState("default") + + React.useEffect(() => { + if (videoTrack) { + const label = videoTrack?.getTrackLabel() + setValue(label) + AgoraRTC.getCameras().then((arr) => { + setItems( + arr.map((item) => ({ + label: item.label, + value: item.label, + deviceId: item.deviceId, + })), + ) + }) + } + }, [videoTrack]) + + const onChange = async (value: string) => { + const target = items.find((item) => item.value === value) + if (target) { + setValue(target.value) + if (videoTrack) { + await videoTrack.setDevice(target.deviceId) + } + } + } + + return ( + + ) +} diff --git a/demo/src/components/Agent/Microphone.tsx b/demo/src/components/Agent/Microphone.tsx new file mode 100644 index 0000000000000000000000000000000000000000..d162a4b55082ce380dc8d060eb06472d28c810f2 --- /dev/null +++ b/demo/src/components/Agent/Microphone.tsx @@ -0,0 +1,179 @@ +"use client" + +import * as React from "react" +import { useMultibandTrackVolume } from "@/common" +import AudioVisualizer from "@/components/Agent/AudioVisualizer" +import AgoraRTC, { IMicrophoneAudioTrack } from "agora-rtc-sdk-ng" +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { Button } from "@/components/ui/button" +import { MicIconByStatus } from "@/components/Icon" + +export default function MicrophoneBlock(props: { + audioTrack?: IMicrophoneAudioTrack +}) { + const { audioTrack } = props + const [audioMute, setAudioMute] = React.useState(false) + const [mediaStreamTrack, setMediaStreamTrack] = + React.useState() + + React.useEffect(() => { + audioTrack?.on("track-updated", onAudioTrackupdated) + if (audioTrack) { + setMediaStreamTrack(audioTrack.getMediaStreamTrack()) + } + + return () => { + audioTrack?.off("track-updated", onAudioTrackupdated) + } + }, [audioTrack]) + + React.useEffect(() => { + audioTrack?.setMuted(audioMute) + }, [audioTrack, audioMute]) + + const subscribedVolumes = useMultibandTrackVolume(mediaStreamTrack, 20) + + const onAudioTrackupdated = (track: MediaStreamTrack) => { + console.log("[test] audio track updated", track) + setMediaStreamTrack(track) + } + + const onClickMute = () => { + setAudioMute(!audioMute) + } + + return ( + } + > +
+ +
+
+ ) +} + +export function CommonDeviceWrapper(props: { + children: React.ReactNode + title: string + Icon: ( + props: React.SVGProps & { active?: boolean }, + ) => React.ReactNode + onIconClick: () => void + isActive: boolean + select?: React.ReactNode +}) { + const { title, Icon, onIconClick, isActive, select, children } = props + + return ( +
+
+
{title}
+
+ + {select} +
+
+ {children} +
+ ) +} + +export type TDeviceSelectItem = { + label: string + value: string + deviceId: string +} + +export const DEFAULT_DEVICE_ITEM: TDeviceSelectItem = { + label: "Default", + value: "default", + deviceId: "", +} + +export const DeviceSelect = (props: { + items: TDeviceSelectItem[] + value: string + onChange: (value: string) => void + placeholder?: string +}) => { + const { items, value, onChange, placeholder } = props + + return ( + + ) +} + +export const MicrophoneSelect = (props: { + audioTrack?: IMicrophoneAudioTrack +}) => { + const { audioTrack } = props + const [items, setItems] = React.useState([ + DEFAULT_DEVICE_ITEM, + ]) + const [value, setValue] = React.useState("default") + + React.useEffect(() => { + if (audioTrack) { + const label = audioTrack?.getTrackLabel() + setValue(label) + AgoraRTC.getMicrophones().then((arr) => { + setItems( + arr.map((item) => ({ + label: item.label, + value: item.label, + deviceId: item.deviceId, + })), + ) + }) + } + }, [audioTrack]) + + const onChange = async (value: string) => { + const target = items.find((item) => item.value === value) + if (target) { + setValue(target.value) + if (audioTrack) { + await audioTrack.setDevice(target.deviceId) + } + } + } + + return +} diff --git a/demo/src/components/Agent/StreamPlayer.tsx b/demo/src/components/Agent/StreamPlayer.tsx new file mode 100644 index 0000000000000000000000000000000000000000..cc8a6d05d9c24da57afc06519295b27c29aabf9d --- /dev/null +++ b/demo/src/components/Agent/StreamPlayer.tsx @@ -0,0 +1,59 @@ +"use client" + +import * as React from "react" +import { + ICameraVideoTrack, + ILocalVideoTrack, + IMicrophoneAudioTrack, + VideoPlayerConfig, +} from "agora-rtc-sdk-ng" + +export interface StreamPlayerProps { + videoTrack?: ICameraVideoTrack | ILocalVideoTrack + audioTrack?: IMicrophoneAudioTrack + style?: React.CSSProperties + fit?: "cover" | "contain" | "fill" + onClick?: () => void + mute?: boolean +} + +export const LocalStreamPlayer = React.forwardRef( + (props: StreamPlayerProps, ref) => { + const { + videoTrack, + audioTrack, + mute = false, + style = {}, + fit = "cover", + onClick = () => {}, + } = props + const vidDiv = React.useRef(null) + + React.useLayoutEffect(() => { + const config = { fit } as VideoPlayerConfig + if (mute) { + videoTrack?.stop() + } else { + if (!videoTrack?.isPlaying) { + videoTrack?.play(vidDiv.current!, config) + } + } + + return () => { + videoTrack?.stop() + } + }, [videoTrack, fit, mute]) + + // local audio track need not to be played + // useLayoutEffect(() => {}, [audioTrack, localAudioMute]) + + return ( +
+ ) + }, +) diff --git a/demo/src/components/Agent/View.tsx b/demo/src/components/Agent/View.tsx new file mode 100644 index 0000000000000000000000000000000000000000..e33caa8bbc0039b0daef1084a92d8007549956c6 --- /dev/null +++ b/demo/src/components/Agent/View.tsx @@ -0,0 +1,39 @@ +"use client" + +import { useMultibandTrackVolume } from "@/common" +import { cn } from "@/lib/utils" +// import AudioVisualizer from "../audioVisualizer" +import { IMicrophoneAudioTrack } from "agora-rtc-sdk-ng" +import AudioVisualizer from "@/components/Agent/AudioVisualizer" + +export interface AgentViewProps { + audioTrack?: IMicrophoneAudioTrack +} + +export default function AgentView(props: AgentViewProps) { + const { audioTrack } = props + + const subscribedVolumes = useMultibandTrackVolume(audioTrack, 12) + + return ( +
+
Agent
+
+ +
+
+ ) +} diff --git a/demo/src/components/Agent/VoicePresetSelect.tsx b/demo/src/components/Agent/VoicePresetSelect.tsx new file mode 100644 index 0000000000000000000000000000000000000000..9cc90f8ab2095afb26c2f63d9bd3b63cb51e2bca --- /dev/null +++ b/demo/src/components/Agent/VoicePresetSelect.tsx @@ -0,0 +1,47 @@ +"use client" + +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { useAppSelector, useAppDispatch, VOICE_OPTIONS } from "@/common" +import { setVoiceType } from "@/store/reducers/global" +import type { VoiceType } from "@/types" +import { VoiceIcon } from "@/components/Icon" + +export default function AgentVoicePresetSelect() { + const dispatch = useAppDispatch() + const options = useAppSelector((state) => state.global.options) + const voiceType = useAppSelector((state) => state.global.voiceType) + + const onVoiceChange = (value: string) => { + dispatch(setVoiceType(value as VoiceType)) + } + + return ( + + ) +} diff --git a/demo/src/components/Button/LoadingButton.tsx b/demo/src/components/Button/LoadingButton.tsx new file mode 100644 index 0000000000000000000000000000000000000000..533734fa2eeda3bbf1b1bb9b91a632987ab42c3b --- /dev/null +++ b/demo/src/components/Button/LoadingButton.tsx @@ -0,0 +1,17 @@ +import { Button, ButtonProps } from "@/components/ui/button" +import { AnimatedSpinnerIcon } from "@/components/Icon" + +export interface LoadingButtonProps extends Omit { + loading?: boolean + svgProps?: React.SVGProps +} + +export function LoadingButton(props: LoadingButtonProps) { + const { loading, disabled, children, svgProps, ...rest } = props + return ( + + ) +} diff --git a/demo/src/components/Card/Login.tsx b/demo/src/components/Card/Login.tsx new file mode 100644 index 0000000000000000000000000000000000000000..60bba7f2aada6235f0047fd792f8232730de319e --- /dev/null +++ b/demo/src/components/Card/Login.tsx @@ -0,0 +1,126 @@ +"use client" + +import type React from "react" +import { useRouter } from "next/navigation" +import { useState, useEffect } from "react" +import { + GITHUB_URL, + getRandomUserId, + useAppDispatch, + getRandomChannel, +} from "@/common" +import { setOptions } from "@/store/reducers/global" +import packageData from "../../../package.json" + +import { + Card, + CardContent, + CardDescription, + CardFooter, + CardHeader, + CardTitle, +} from "@/components/ui/card" +import { Button } from "@/components/ui/button" +import { Input } from "@/components/ui/input" +import NextLink from "next/link" +import { GitHubIcon } from "@/components/Icon" +import { toast } from "sonner" +import { LoadingButton } from "@/components/Button/LoadingButton" + +const { version } = packageData + +export default function LoginCard() { + const dispatch = useAppDispatch() + const router = useRouter() + const [userName, setUserName] = useState("") + const [isLoadingSuccess, setIsLoadingSuccess] = useState(false) + + const onUserNameChange = (e: React.ChangeEvent) => { + let value = e.target.value + value = value.replace(/\s/g, "") + setUserName(value) + } + + useEffect(() => { + const onPageLoad = () => { + setIsLoadingSuccess(true) + } + + if (document.readyState === "complete") { + onPageLoad() + } else { + window.addEventListener("load", onPageLoad, false) + return () => window.removeEventListener("load", onPageLoad) + } + }, []) + + const onClickJoin = () => { + if (!userName) { + toast.error("please enter a user name") + return + } + const userId = getRandomUserId() + dispatch( + setOptions({ + userName, + channel: getRandomChannel(), + userId, + }), + ) + router.push("/home") + } + + return ( + <> +
+ + + + + TEN Agent + + + A Realtime Conversational AI Agent powered by TEN + + + +
{ + e.preventDefault() + onClickJoin() + }} + className="flex flex-col gap-6" + > + + + {isLoadingSuccess ? "Join" : "Loading"} + +
+
+ +

Version {version}

+
+
+
+ + ) +} diff --git a/demo/src/components/Chat/ChatCard.tsx b/demo/src/components/Chat/ChatCard.tsx new file mode 100644 index 0000000000000000000000000000000000000000..76c3abff650f1804587124be0d85ed71def6a689 --- /dev/null +++ b/demo/src/components/Chat/ChatCard.tsx @@ -0,0 +1,187 @@ +"use client" + +import * as React from "react" +import { cn } from "@/lib/utils" +import { LanguageSelect, GraphSelect } from "@/components/Chat/ChatCfgSelect" +import PdfSelect from "@/components/Chat/PdfSelect" +import { useAppDispatch, useAppSelector, isRagGraph, isLanguageSupported } from "@/common" +import { setRtmConnected, addChatItem } from "@/store/reducers/global" +import MessageList from "@/components/Chat/MessageList" +import { Button } from "@/components/ui/button" +import { Send } from "lucide-react" +import { rtmManager } from "@/manager/rtm" +import { type IRTMTextItem, EMessageDataType, EMessageType, ERTMTextType } from "@/types" + +let hasInit: boolean = false + +export default function ChatCard(props: { className?: string }) { + const { className } = props + const [inputValue, setInputValue] = React.useState("") + + const graphName = useAppSelector((state) => state.global.graphName) + const rtmConnected = useAppSelector((state) => state.global.rtmConnected) + const options = useAppSelector((state) => state.global.options) + const agentConnected = useAppSelector((state) => state.global.agentConnected) + const dispatch = useAppDispatch() + + const disableInputMemo = React.useMemo(() => { + return ( + !options.channel || + !options.userId || + !options.appId || + !options.token || + !rtmConnected || + !agentConnected + ) + }, [ + options.channel, + options.userId, + options.appId, + options.token, + rtmConnected, + agentConnected, + ]) + + React.useEffect(() => { + if ( + !options.channel || + !options.userId || + !options.appId || + !options.token + ) { + return + } + if (hasInit) { + return + } + + init() + + return () => { + // if (hasInit) { + // destory() + // } + } + }, [options.channel, options.userId, options.appId, options.token]) + + const init = async () => { + console.log("[rtm] init") + await rtmManager.init({ + channel: options.channel, + userId: options.userId, + appId: options.appId, + token: options.token, + }) + dispatch(setRtmConnected(true)) + rtmManager.on("rtmMessage", onTextChanged) + hasInit = true + } + const destory = async () => { + console.log("[rtm] destory") + rtmManager.off("rtmMessage", onTextChanged) + await rtmManager.destroy() + dispatch(setRtmConnected(false)) + hasInit = false + } + + const onTextChanged = (text: IRTMTextItem) => { + console.log("[rtm] onTextChanged", text) + if (text.type == ERTMTextType.TRANSCRIBE) { + // const isAgent = Number(text.uid) != Number(options.userId) + dispatch( + addChatItem({ + userId: options.userId, + text: text.text, + type: `${text.stream_id}` === "0" ? EMessageType.AGENT : EMessageType.USER, + data_type: EMessageDataType.TEXT, + isFinal: text.is_final, + time: text.ts, + }), + ) + } + if (text.type == ERTMTextType.INPUT_TEXT) { + dispatch( + addChatItem({ + userId: options.userId, + text: text.text, + type: EMessageType.USER, + data_type: EMessageDataType.TEXT, + isFinal: true, + time: text.ts, + }), + ) + } + } + + const handleInputChange = (e: React.ChangeEvent) => { + setInputValue(e.target.value) + } + + const handleInputSubmit = (e: React.FormEvent) => { + e.preventDefault() + if (!inputValue || disableInputMemo) { + return + } + rtmManager.sendText(inputValue) + setInputValue("") + } + + return ( + <> + {/* Chat Card */} +
+
+ {/* Action Bar */} +
+ + { + isLanguageSupported(graphName) ? + : + null + } + {isRagGraph(graphName) && } +
+ {/* Chat messages would go here */} + +
+
+ + +
+
+
+
+ + ) +} diff --git a/demo/src/components/Chat/ChatCfgSelect.tsx b/demo/src/components/Chat/ChatCfgSelect.tsx new file mode 100644 index 0000000000000000000000000000000000000000..8c656a092c27f8c05de31eb0f358571fe16c6fc1 --- /dev/null +++ b/demo/src/components/Chat/ChatCfgSelect.tsx @@ -0,0 +1,84 @@ +"use client" + +import { + Select, + SelectContent, + SelectGroup, + SelectItem, + SelectLabel, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { + useAppDispatch, + LANGUAGE_OPTIONS, + useAppSelector, + GRAPH_OPTIONS, +} from "@/common" +import type { Language } from "@/types" +import { setGraphName, setLanguage } from "@/store/reducers/global" + +export function GraphSelect() { + const dispatch = useAppDispatch() + const graphName = useAppSelector((state) => state.global.graphName) + const agentConnected = useAppSelector((state) => state.global.agentConnected) + const onGraphNameChange = (val: string) => { + dispatch(setGraphName(val)) + } + + return ( + <> + + + ) +} + +export function LanguageSelect() { + const dispatch = useAppDispatch() + const language = useAppSelector((state) => state.global.language) + const agentConnected = useAppSelector((state) => state.global.agentConnected) + + const onLanguageChange = (val: Language) => { + dispatch(setLanguage(val)) + } + + return ( + <> + + + ) +} diff --git a/demo/src/components/Chat/MessageList.tsx b/demo/src/components/Chat/MessageList.tsx new file mode 100644 index 0000000000000000000000000000000000000000..bb8ba6b5fba1ded71c673cf0af75aaf78a6df24f --- /dev/null +++ b/demo/src/components/Chat/MessageList.tsx @@ -0,0 +1,72 @@ +import * as React from "react" +import { + useAppDispatch, + useAutoScroll, + LANGUAGE_OPTIONS, + useAppSelector, + GRAPH_OPTIONS, + isRagGraph, +} from "@/common" +import { Bot, Brain, MessageCircleQuestion } from "lucide-react" +import { EMessageDataType, EMessageType, type IChatItem } from "@/types" +import { Avatar, AvatarFallback } from "@/components/ui/avatar" +import { cn } from "@/lib/utils" + +export default function MessageList(props: { className?: string }) { + const { className } = props + + const chatItems = useAppSelector((state) => state.global.chatItems) + + const containerRef = React.useRef(null) + + useAutoScroll(containerRef) + + return ( +
+ {chatItems.map((item, index) => { + return + })} +
+ ) +} + +export function MessageItem(props: { data: IChatItem }) { + const { data } = props + + return ( + <> +
+ {data.type === EMessageType.AGENT ? data.data_type === EMessageDataType.REASON ? ( + + + + + + ) : ( + + + + + + ) : null} +
+ {data.data_type === EMessageDataType.IMAGE ? ( + chat + ) : ( +

{data.text}

+ )} +
+
+ + ) +} diff --git a/demo/src/components/Chat/PdfSelect.tsx b/demo/src/components/Chat/PdfSelect.tsx new file mode 100644 index 0000000000000000000000000000000000000000..c9959351212199be1d96301bfcbab122919eac0d --- /dev/null +++ b/demo/src/components/Chat/PdfSelect.tsx @@ -0,0 +1,191 @@ +"use client" + +import * as React from "react" +import { Button } from "@/components/ui/button" +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, + DialogTrigger, +} from "@/components/ui/dialog" +import { Input } from "@/components/ui/input" +import { Label } from "@/components/ui/label" +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { FileTextIcon } from "lucide-react" + +import { OptionType, IPdfData } from "@/types" +import { + apiGetDocumentList, + apiUpdateDocument, + useAppSelector, + genUUID, +} from "@/common" +import { toast } from "sonner" + +export default function PdfSelect() { + const options = useAppSelector((state) => state.global.options) + const { channel, userId } = options + const [pdfOptions, setPdfOptions] = React.useState([]) + const [selectedPdf, setSelectedPdf] = React.useState("") + const agentConnected = useAppSelector((state) => state.global.agentConnected) + + React.useEffect(() => { + if (agentConnected) { + getPDFOptions() + } + }, [agentConnected]) + + const getPDFOptions = async () => { + const res = await apiGetDocumentList() + setPdfOptions( + res.data.map((item: any) => { + return { + value: item.collection, + label: item.file_name, + } + }), + ) + setSelectedPdf("") + } + + const onUploadSuccess = (data: IPdfData) => { + setPdfOptions([ + ...pdfOptions, + { + value: data.collection, + label: data.fileName, + }, + ]) + setSelectedPdf(data.collection) + } + + const onSelectPdf = async (val: string) => { + const item = pdfOptions.find((item) => item.value === val) + if (!item) { + // return message.error("Please select a PDF file") + return + } + setSelectedPdf(val) + await apiUpdateDocument({ + collection: val, + fileName: item.label, + channel, + }) + } + + return ( + <> + + + + + + + Upload & Select PDF + + +
+ +
+
+
+ + ) +} + +export function UploadPdf({ + onSuccess, +}: { + onSuccess?: (data: IPdfData) => void +}) { + const agentConnected = useAppSelector((state) => state.global.agentConnected) + const options = useAppSelector((state) => state.global.options) + const { channel, userId } = options + const [uploading, setUploading] = React.useState(false) + + const handleUpload = async (e: React.ChangeEvent) => { + if (!agentConnected) { + toast.error("Please connect to agent first") + return + } + + const file = e.target.files?.[0] + if (!file) return + + setUploading(true) + + const formData = new FormData() + formData.append("file", file) + formData.append("channel_name", channel) + formData.append("uid", String(userId)) + formData.append("request_id", genUUID()) + + try { + const response = await fetch("/api/vector/document/upload", { + method: "POST", + body: formData, + }) + const data = await response.json() + + if (data.code === "0") { + toast.success(`Upload ${file.name} success`) + const { collection, file_name } = data.data + onSuccess?.({ + fileName: file_name, + collection, + }) + } else { + toast.info(data.msg) + } + } catch (err) { + toast.error(`Upload ${file.name} failed`) + } finally { + setUploading(false) + } + } + + return ( +
+ +
+ ) +} diff --git a/demo/src/components/Dialog/Settings.tsx b/demo/src/components/Dialog/Settings.tsx new file mode 100644 index 0000000000000000000000000000000000000000..773bb6b1f0166435972795376c052d20d3e4aca3 --- /dev/null +++ b/demo/src/components/Dialog/Settings.tsx @@ -0,0 +1,494 @@ +"use client" + +import * as React from "react" +import { Button } from "@/components/ui/button" +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, + DialogTrigger, +} from "@/components/ui/dialog" +import { Input } from "@/components/ui/input" +import { Label } from "@/components/ui/label" +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form" +import { Textarea } from "@/components/ui/textarea" +import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs" +import { + Select, + SelectContent, + SelectGroup, + SelectItem, + SelectLabel, + SelectTrigger, + SelectValue, +} from "@/components/ui/select" +import { SettingsIcon, EraserIcon, ShieldCheckIcon } from "lucide-react" +import { zodResolver } from "@hookform/resolvers/zod" +import { useForm } from "react-hook-form" +import { z } from "zod" +import { toast } from "sonner" +import { useAppDispatch, useAppSelector } from "@/common/hooks" +import { ECozeBaseUrl } from "@/common/constant" +import { + setAgentSettings, + setCozeSettings, + resetCozeSettings, + resetDifySettings, + setGlobalSettingsDialog, + setDifySettings, +} from "@/store/reducers/global" + +const TABS_OPTIONS = [ + { + label: "Agent", + value: "agent", + }, + { + label: "Coze", + value: "coze", + }, + { + label: "Dify", + value: "dify", + }, +] + +export const useSettingsTabs = () => { + const [tabs, setTabs] = React.useState(TABS_OPTIONS) + + const graphName = useAppSelector((state) => state.global.graphName) + + const enableCozeSettingsMemo = React.useMemo(() => { + return isCozeGraph(graphName) + }, [graphName]) + + const enableDifySettingsMemo = React.useMemo(() => { + return isDifyGraph(graphName) + }, [graphName]) + + const enableGreetingsOrPromptMemo: { greeting: boolean, prompt: boolean } = React.useMemo(() => { + if (graphName === "va_gemini_v2v") { + return { + greeting: false, + prompt: true, + } + } else if (graphName === "va_dify_azure") { + return { + greeting: true, + prompt: false, + } + } else if (graphName === "story_teller_stt_integrated") { + return { + greeting: true, + prompt: false, + } + } + + return { + greeting: true, + prompt: true, + } + }, [graphName]) + + + + React.useEffect(() => { + if (enableCozeSettingsMemo) { + setTabs((prev) => + [ + { label: "Agent", value: "agent" }, + { label: "Coze", value: "coze" }, + ] + ) + } else if (enableDifySettingsMemo) { + setTabs((prev) => + [ + { label: "Agent", value: "agent" }, + { label: "Dify", value: "dify" }, + ] + ) + } else { + setTabs((prev) => prev.filter((tab) => tab.value !== "coze" && tab.value !== "dify")) + } + }, [enableCozeSettingsMemo, enableDifySettingsMemo]) + + return { + tabs, + enableGreetingsOrPromptMemo, + } +} + +export default function SettingsDialog() { + const dispatch = useAppDispatch() + const globalSettingsDialog = useAppSelector( + (state) => state.global.globalSettingsDialog, + ) + + const { tabs, enableGreetingsOrPromptMemo } = useSettingsTabs() + + const handleClose = () => { + dispatch(setGlobalSettingsDialog({ open: false, tab: undefined })) + } + + return ( + + dispatch(setGlobalSettingsDialog({ open, tab: undefined })) + } + > + + + + + + Settings + + + {tabs.length > 1 && ( + + {tabs.map((tab) => ( + + {tab.label} + + ))} + + )} + + + + + + + + + + + + + ) +} + +const formSchema = z.object({ + greeting: z.string().optional(), + prompt: z.string().optional(), +}) + +export function CommonAgentSettingsTab(props: { + enableGreeting?: boolean + enablePrompt?: boolean + handleClose?: () => void + handleSubmit?: (values: z.infer) => void +}) { + const { handleSubmit, enableGreeting, enablePrompt } = props + + const dispatch = useAppDispatch() + const agentSettings = useAppSelector((state) => state.global.agentSettings) + + const form = useForm>({ + resolver: zodResolver(formSchema), + defaultValues: { + greeting: agentSettings.greeting, + prompt: agentSettings.prompt, + }, + }) + + function onSubmit(values: z.infer) { + console.log("Form Values:", values) + dispatch(setAgentSettings(values)) + handleSubmit?.(values) + } + + return ( +
+ + {enableGreeting && ( + + Greeting + +