version: 1.0.0
sections:
main:
- ai:
# post_prompt_url - string
# URL to post the conversation to after it ended
# Supports https://user:pass@server.com format
# DEFAULT: undef
post_prompt_url: http://user:pass@some.server.com/rest/post.cgi
# params - object
# Params to change behaviors in the ai app
# DEFAULT: undef
# There may be undocumented params so any name/value pair where the value is a string or boolean is a valid param.
params:
# direction - string
# Force the direction of the call to the assistant
# "inbound" or "outbound"
# DEFAULT: (natural direction of the call)
direction: inbound
# wait_for_user - boolean
# Do not start talking when the call is setup, wait for
# the other side of the call to say something first.
# DEFAULT: false (start talking right away)
wait_for_user: true
# end_of_speech_timeout - int (number of milliseconds)
# Amount of silence to tolerate at the end of an utterance to detect end of speech.
# Valid range 250 - 10,000
# DEFAULT: 2000 (2sec)
end_of_speech_timeout: 2000
# attention_timeout - int (number of milliseconds)
# The amount of time to wait before prompting the user to respond.
# may be be set to 0 to disable. Valid range 10,000 - 600,000
# DEFAULT: 10000 (10sec)
attention_timeout: 10000
# inactivity_timeout - int (number of milliseconds)
# The amount of time to wait before exiting the app due to inactivity.
# Valid range 10,000 - 3,600,000
# DEFAULT: 600000 (10min)
inactivity_timeout: 600000
# outbound_attention_timeout - int (number of milliseconds)
# The amount of time when making an outbound call wait for the user to answer before prompting the user "what did you say?"
# Hiting this timeout will end the call.
# may be be set to 0 to disable. Valid range 10,000 - 600,000
# DEFAULT: 120000 (2min)
outbound_attention_timeout: 120000
# background_file - string
# File to play in the background while the app is running
# DEFAULT: undefined
background_file: https://path/to/file.mp3
# background_file_loops - number
# Number of times to play the background file.
# DEFAULT: undefined (implies endless)
background_file_loops: 10
# background_file_volume - number
# volume for the background file to play at.
# range: -50 to 50 with 0 being orignal volume
# DEFAULT: 0 (the natural volume of the file)
background_file_volume: -25
# local_tz - string
# The current time zone the bot is in.
# DEFAULT: GMT
local_tz: "America/Chicago"
# conscience - boolen or string
# Enable conscience which enforces prompts more strictly
# when its a string its the conscience prompt to use when boolean it is either false for off or true for the default conscience.
# DEFALUT: true
conscience: true
# ai_volume
# Enhance volume lower or higher
# Range -50 to 50, less than 0 is quiter, greater than 0 is louder
# DEFAULT: 0 (same as original)
ai_volume: 0
# save_conversation - boolean
# Indicates an implicit summary of the conversation should be sent after the call ends.
# This requires post_url to be set and the param conversation_id but does not require post_prompt as the summary will be made anyway.
# DEFAULT: false
save_conversation: true
# conversation_id - string
# A unique identifier for this conversation to be used to retain info from call to call
# DEFAULT: undefined
conversation_id: "80afbb06-f0f1-11ed-a285-62c3bdb19a89"
# digit_timeout - number
# Number of milliseconds to consider dialed digits timed out.
# Once you start dialing a digit then stop for this long, the string will be sent as input.
# DEFAULT: 3000 (3 sec)
digit_timeout: 3000
# digit_terminators - string
# A string of dtmf digits considered to be a terminaton of the input.
# If you dial a digit that is one of the terminators, it will end the input and not be considered as part of the input.
# DEFAULT: undefined
digit_terminators: "#"
# energy_level - decimal number
# Amount of energy necessary for bot to hear you (in dB)
# DEFAULT: 52 (range: 0 - 100)
energy_level: 52
# swaig_allow_swml - boolean
# Allow SWAIG functions to return SWML to be executed by the call
# DEFAULT: true
swaig_allow_swml: true
# THESE PARAMS ARE ONLY ENABLED IN STAGING
# ai_model - string
# AI Model to use
# DEFAULT gpt-4o-mini
ai_model: gpt-4o-mini
#
# hints - array of strings
# Hints array to the ASR about words you want it to recoginze.
# DEFAULT: undef
hints:
- testing
- hints
#
# prompt - object (REQUIRED)
# Main prompt to send to the assistant to define its
# behavior.
# DEFAULT: undef
prompt:
# confidence - percent as decimal
# Confidence percent (0.0 - 1.0) to define the threshold
# where the ASR will fire a speech-detect event even when
# the vad has not detected silence yet.
# DEFAULT: 0.6 (60%)
confidence: 0.6
# barge_confidence - percent as decimal
# Similar to confidence but the threshold of confidence to
# barge in and interrupt the AI when its talking.
# DEFAULT: 0.1 (10%)
barge_confidence: 0.1
# top_p - percent as decimal
# Probability mass (top X percent of probable tokens)
# See https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p
# DEFAULT: 1.0 (100%)
top_p: 1.0
# temperature - decimal 0.0 - 2.0
# Higher the value the more random and the lower the more deterministic the output is.
# see https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature
# DEFAULT: 0.3
temperature: 0.3
# frequency_penalty - decimal -2.0 - 2.0
# Positive values penalize new tokens based on their
# existing frequency in the text so far, decreasing the
# model's likelihood to repeat the same line verbatim.
# See https://platform.openai.com/docs/api-reference/chat/create#chat/create-frequency_penalty
# DEFAULT: 0.1
frequency_penalty: 0.1
# presence_penalty - decimal -2.0 - 2.0
# Positive values penalize new tokens based on whether
# they appear in the text so far, increasing the model's
# likelihood to talk about new topics.
# See https://platform.openai.com/docs/api-reference/chat/create#chat/create-presence_penalty
# DEFAULT: 0.1
presence_penalty: 0.1
# text - string (REQUIRED)
# The text of the prompt. This is the main instruction
# prompt to prep the AI assistant with its personality and behaviors.
# DEFAULT: undef
text: >-
You name is Franklin and you are an expert at Star Wars.
Introduce yourself and see if I have any questions.
#
# post_prompt - object
# At the end of the conversation if post_prompt and post_prompt_url
# are set, the entire transcript is
# sent to the AI one last time and this prompt lets you
# extract data, summarize the convo, or anything else you
# want to do. The whole conversation log, along with the
# result of this post_prompt will be posted to post_prompt_url
# DEFAULT: undef
post_prompt:
# confidence - percent as decimal
# Confidence percent (0.0 - 1.0) to define the threshold
# where the ASR will fire a speech-detect event even when
# the vad has not detected silence yet.
# DEFAULT: 0.6 (60%)
confidence: 0.6
# barge_confidence - percent as decimal
# Similar to confidence but the threshold of confidence to
# barge in and interrupt the AI when its talking.
# DEFAULT: 0.1 (10%)
barge_confidence: 0.1
# top_p - percent as decimal
# Probability mass (top X percent of probable tokens)
# See https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p
# DEFAULT: 1.0 (100%)
top_p: 1.0;
# temperature - decimal 0.0 - 2.0
# Higher the value the more random and the lower the more deterministic the output is.
# see https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature
# DEFAULT: 0.3
temperature: 0.3
# frequency_penalty - decimal -2.0 - 2.0
# Positive values penalize new tokens based on their
# existing frequency in the text so far, decreasing the
# model's likelihood to repeat the same line verbatim.
# See https://platform.openai.com/docs/api-reference/chat/create#chat/create-frequency_penalty
# DEFAULT: 0.1
frequency_penalty: 0.0
# presence_penalty - decimal -2.0 - 2.0
# Positive values penalize new tokens based on whether
# they appear in the text so far, increasing the model's
# likelihood to talk about new topics.
# See https://platform.openai.com/docs/api-reference/chat/create#chat/create-presence_penalty
# DEFAULT: 0.0
presence_penalty: 0.0
# text - string (REQUIRED)
# The text of the prompt. This instruction is sent to the
# AI to POST-PROCESS the conversation and draw one final conclusion.
# DEFAULT: undef
text: >-
Please summarize the conversation
#
# languages - array of objects
# A set of languages supported and what voice to use for
# each one.
# DEFAULT: undef
languages:
# Defining more than one language automatically enables
# language support unless its explicitly disabled by the
# languages_enabled param.
# Each object in the languages array has the following params:
# name - string | Proper name of the language.
# code - string | Lang code for the ASR (currently gcloud only)
# voice - string | The voice to use (currently gcloud voices)
# All 3 params are required for a valid entry.
- name: English
code: en-US
voice: en-US-Neural2-F
- name: French
code: fr-FR
voice: fr-FR-Neural2-E
#
# pronounce - array of objects
# A set of expressions you may want to spell phoneticly to make it sound better in TTS
# DEFAULT: undef
pronounce:
# Each object in the pronounce array has the following params:
# replace - string | The expression to replace.
# with - string | the phonetic way to pronounce the expression.
# ignore_case - boolean | Should the replace ignore case.
# replace and with are required for a valid entry.
- replace: voip
with: voyp
ignore_case: true
- replace: SIP
with: sip
ignore_case: true
#
# SWAIG - array of objects
# SignalWire AI Gateway
# A set of functions that allow the AI to use your tools to
# get real-time info from the real world.
# The SWAIG section contains and object of defaults and an array of functions.
# Each object in the functions array has the following params:
# function - a unique function name
# argument - a description of the input to the function
# purpose - a string describing the context and purpose so the model knows when to use the function.
# web_hook_url - the url to post the data to when executing (may contain user:pass@)
# web_hook_auth_user - the username in the basic auth. (ignored when user:pass is in url)
# web_hook_auth_pass - the passwored for the basic auth (ignored when user:pass is in url)
# meta_data_token - scoping token for meta_data, if not supplied will be scoped to function's web_hook_url
# meta_data - any data you want to push through to the webhook.
#
# The defaults object provides defaults for web_hook_url, web_hook_auth_user, web_hook_auth_pass or meta_data.
#
SWAIG:
defaults:
web_hook_url: https://user:pass@somewhere.com/weather.cgi
# meta_data_token is a unique for function specific user data
meta_data_token: 5dcec27c-107b-11ee-a281-62c3bdb19a89
# meta_data is a data store of all functions who share the same meta_data_token.
# Each individual function can also have meta_data and meta_data_token
meta_data:
my_key: some value
# Native functions to be enabled that are built into the engine.
# The only vaild answers are check_time and wait_seconds
native_functions:
# allow the ai to check the time in any timezone
- check_time
# allow the ai to sleep for an amount of time
- wait_seconds
# Remote function signatures.
includes:
# if url is not supplied it will use web_hook_url above.
- url: https://me:secret@somewhere.com/funtions.cgi
# functions is an array of function names you want to load from the remote server.
functions:
- function1
- function2
# functions are an array of function objects
functions:
# This function will call the webhook_url if one is not provided, it will get it from defaults.
# A webhook must be found either in the function object or in defaults or a data_map section must exist or the function is invalid.
- function: get_weather
meta_data_token: 5dcec27c-107b-11ee-a281-62c3bdb19a89
# meta_data is a data store of all functions who share the same meta_data_token.
meta_data:
my_key: some value
# purpose is the description of the function so the model knows what its for.
purpose: To determine what the current weather is in a provided location.
# argument is a json schema to describe the arguments the function should receive.
argument:
# indicates to pass an object {}
type: object
# the keys of the object
properties:
# location is the name of one of the keys in the object the type and description defines what the param is.
location:
type: string
description: the location to check the weather in
# This function will look up an address from a remote api, it uses a data_map to map a public webhook to the format the SW AI expects.
- function: address_lookup
purpose: To check if an address is valid, When using this function make sure you collect the street and zip
argument:
type: "object"
properties:
street:
type: string
description: the street addr e.g. 1234 N Main st.
zipcode:
type: string
description: the 5 digit zipcode
# data_map tells the function engine how to translate webhooks
data_map:
# webhooks is an array of webhook objects that are processed until the first one that returns an output
webhooks:
# This will lookup the address from rapidapi
- url: "https://global-address.p.rapidapi.com/V3/WEB/GlobalAddress/doGlobalAddress?ctry=US&format=json&a1=${enc:args.street}&DeliveryLines=Off&postal=${args.zipcode}"
headers:
# these headers will be sent with the webhook
X-RapidAPI-Key: example_key_string
X-RapidAPI-Host: global-address.p.rapidapi.com
# expressions are an array of objects that can expand variables from the template and match against a regex.
# if the pattern is matched the output section from the expression object will be returned to the function processing engine.
# the special key input.args contains the function arguments as specified in the argument section. e.g. ${input.args.street}
# would be the value passed for street based on above.
expressions:
# string is the thing you want to match you usually use variable expansion here to get the string to match
- string: "${Records[0].FormattedAddress}"
# pattern is the regular expression to match string
pattern: /\w+/i
# output is arranged in the format the function processing engine expects.
output:
# response is a prompt to send back to AI as a result of its query. Variables can be expanded here too
response: The Address is valid ${Records[0].FormattedAddress}
# action is a series of commands that the function can do to manipulate the system.
action:
set_meta_data:
addr: "${Records[0].FormattedAddress}"
LL: "${Records[0].Latitude},${Records[0].Longitude}"
# If none of the expressions match, and there is an output at this level, that output obj is returned to the function processing engine.
output:
response: There was an error verifying the address.
# This function gets a list of restaurants based on a genre
- function: get_restaurant_list
purpose: To look for restaurants given a certian genre
argument:
type: object
properties:
genre:
type: string
description: the genre of food
data_map:
webhooks:
# require_args says if the arg is not supplied, move on to the next webhook.
- require_args: genre
# error_keys define keys that, if present in the webhook response, will move on to the next webhook.
error_keys: error
# the meta_data object here is data the previous function set using the set_meta_data action.
url: "https://api.content.tripadvisor.com/api/v1/location/search?key=MYCUSTOMKEY&searchQuery=${lc:enc:args.genre}&latLong=${enc:meta_data.LL}&language=en"
# foreach is like a programming foreach loop. this is used when the webhook response contains an array
foreach:
# input_key is the name of the key in the response, the example here data means the response contains {"data": [...]}
input_key: data
# output_key is the name of a new key to create with the results of the loop.
output_key: summary
# max is the max rows to process, if not set all rows will be processed.
max: 3
# append does the work for the loop, the variables are expanded here to create a result string and append it to the defined output_key
append: "location_id: ${this.location_id} name: ${this.name} distance: ${this.distance} miles\n"
# output is the same as in other places, you can supply a response (with optional actions)
# The key defined in output_key is a variable here e.g. ${summary}
output:
response: "Here are the top 3 matches. ${summary}\n(Never mention the location_id)"
# this function does a restaurant lookup given a location_id
- function: restaurant_lookup
purpose: To look for restaurants given a certian location_id
argument:
type: object
properties:
location_id:
type: string
description: the location_id to look up a specific location_id
# this data_map expects a single object with data keys that can be expanded as variables.
data_map:
webhooks:
- require_args: location_id
error_keys: error
url: "https://api.content.tripadvisor.com/api/v1/location/${args.location_id}/details?key=MY_API_KEY&language=en¤cy=USD"
# a single output is just returned with variable expansion.
output:
response: "Summarize this info for ${name}. ${description} Address: (omit plus 4) ${address_obj.address_string}, Phone Number: (repeat as is) @{fmt_ph national:sep ${phone}}, Rating: ${rating}, Ranking: ${ranking_data.ranking_string}"
#
# Develoepr MODE (STAGING ONLY)
# in Developer MODE there the following extra behaviors:
# There is a extra param called developer_prompt which is an extra hidden system prompt to test global prompt adjustments.
# If the prompt: param starts with "Generate: " The AI will just spit out the prompt verbatim.
# If the prompt: param starts with "RAW: " the following will be interpeted as the entire prompt and no other info will be passed to AI
#
# END