videobridge {
entity-expiration {
# If an entity has no activity after this timeout, it is expired
timeout=1 minute
# The interval at which the videobridge will check for expired entities
check-interval=${videobridge.entity-expiration.timeout}
}
health {
# The interval between health checks
interval=10 seconds
# The timeout for a health check
timeout=30 seconds
# If performing a health check takes longer than this, it is considered unsuccessful.
max-check-duration=3 seconds
# Whether or not health check failures should be 'sticky'
# (i.e. once the bridge becomes unhealthy, it will never
# go back to a healthy state)
sticky-failures=false
}
ep-connection-status {
# How long we'll wait for an endpoint to *start* sending
# data before we consider it 'inactive'
first-transfer-timeout=15 seconds
# How long an endpoint can be 'inactive' before it will
# be considered disconnected
max-inactivity-limit=3 seconds
# How often we check endpoint's connectivity status
check-interval=500 milliseconds
}
cc {
bwe-change-threshold=0.15
thumbnail-max-height-px=180
onstage-ideal-height-px=1080
onstage-preferred-height-px=360
onstage-preferred-framerate=30
enable-onstage-video-suspend=false
trust-bwe=true
# How often we check to send probing data
padding-period=15ms
# How often we'll force recalculations of forwarded
# streams
max-time-between-calculations = 15 seconds
# A JVB-wide last-n value, observed by all endpoints. Endpoints
# will take the minimum of their setting and this one (-1 implies
# no last-n limit)
jvb-last-n = -1
}
# The APIs by which the JVB can be controlled
apis {
xmpp-client {
# The interval at which presence is published in the configured MUCs.
presence-interval = ${videobridge.stats.interval}
configs {
# example-connection-id {
# For the properties which should be
# filled out here, see MucClientConfiguration
# }
}
}
# The COLIBRI REST API
rest {
enabled = false
}
jvb-api {
enabled = false
}
}
# Configuration of the different REST APIs.
# Note that the COLIBRI REST API is configured under videobridge.apis.rest instead.
rest {
debug {
enabled = true
}
health {
enabled = true
}
shutdown {
# Note that the shutdown API requires the COLIBRI API to also be enabled.
enabled = false
}
version {
enabled = true
}
}
http-servers {
# The HTTP server which hosts services intended for 'public' use
# (e.g. websockets for the bridge channel connection)
public {
# See JettyBundleActivatorConfig in Jicoco for values
port = -1
tls-port = -1
}
# The HTTP server which hosts services intended for 'private' use
# (e.g. health or debug stats)
private {
# See JettyBundleActivatorConfig in Jicoco for values
host = 127.0.0.1
}
}
octo {
# Whether or not Octo is enabled
enabled=false
# A string denoting the 'region' of this JVB. This region
# will be used by Jicofo in the selection of a bridge for
# a client by comparing it to the client's region.
# Must be set when 'enabled' is true
#region="us-west-1"
# The address on which the Octo relay should bind
# Must be set when 'enabled' is true
#bind-address=198.51.100.1
# The port to which the Octo relay should bind
bind-port=4096
# The address which controls the public address which
# will be part of the Octo relayId
#public-address=198.51.100.1
# The size of the incoming octo queue. This queue is per-remote-endpoint,
# so it matches what we use for local endpoints
recv-queue-size=1024
# The size of the outgoing octo queue. This is a per-originating-endpoint
# queue, so assuming all packets are routed (as they currently are for Octo)
# it should be the same size as the transceiver recv queue in
# jitsi-media-transform. Repeating the description from there:
# Assuming 300pps for high-definition, 200pps for standard-definition,
# 100pps for low-definition and 50pps for audio, this queue is fed
# 650pps, so its size in terms of millis is 1024/650*1000 ~= 1575ms.
send-queue-size=1024
}
load-management {
# Whether or not the reducer will be enabled to take actions to mitigate load
reducer-enabled = false
load-measurements {
packet-rate {
# The packet rate at which we'll consider the bridge overloaded
load-threshold = 50000
# The packet rate at which we'll consider the bridge 'underloaded' enough
# to start recovery
recovery-threshold = 40000
}
}
load-reducers {
last-n {
# The factor by which we'll reduce the current last-n when trying to reduce load
reduction-scale = .75
# The factor by which we'll increase the current last-n when trying to recover
recover-scale = 1.25
# The minimum time in between runs of the last-n reducer to reduce or recover from
# load
impact-time = 1 minute
# The lowest value we'll set for last-n
minimum-last-n-value = 0
# The highest last-n value we'll enforce. Once the enforced last-n exceeds this value
# we'll remove the limit entirely
maximum-enforced-last-n-value = 40
}
}
}
sctp {
# Whether SCTP data channels are enabled.
enabled=true
}
stats {
# Whether periodic collection of statistics is enabled or not. When enabled they are accessible through the REST
# API (at `/colibri/stats`), and are available to other modules (e.g. to be pushed to callstats or in a MUC).
enabled = false
# The interval at which stats are gathered.
interval = 5 seconds
# Configuration related to pushing statistics to callstats.io.
callstats {
# An integer application ID (use 0 to disable pushing stats to callstats).
app-id = 0
# The shared secred to authentication with callstats.io.
//app-secret = "s3cret"
# ID of the key that was used to generate token.
//key-id = "abcd"
# The path to private key file.
//key-path = "/etc/jitsi/videobridge/ecpriv.jwk"
# The ID of the server instance to be used when reporting to callstats.
bridge-id = "jitsi"
# TODO: document
//conference-id-prefix = "abcd"
# The interval at which statististics will be published to callstats. This affects both per-conference and global
# statistics.
# Note that this value will be overriden if a "callstatsio" transport is defined in the parent "stats" section.
interval = ${videobridge.stats.interval}
}
}
websockets {
enabled=false
server-id="default-id"
# Optional, even when 'enabled' is set to true
# tls=true
# Must be set when enabled = true
#domain="some-domain"
}
ice {
tcp {
# Whether ICE/TCP is enabled.
enabled = true
# The port to bind to for ICE/TCP.
port = 8080
# An optional additional port to advertise.
# mapped-port = 8443
# Whether to use "ssltcp" or plain "tcp".
ssltcp = true
}
udp {
# The port for ICE/UDP.
port = 10000
}
# An optional prefix to include in STUN username fragments generated by the bridge.
#ufrag-prefix = "jvb-123:"
# Which candidate pairs to keep alive. The accepted values are defined in ice4j's KeepAliveStrategy:
# "selected_and_tcp", "selected_only", or "all_succeeded".
keep-alive-strategy = "selected_and_tcp"
# Whether to use the "component socket" feature of ice4j.
use-component-socket = true
# Whether to attempt DNS resolution for remote candidates that contain a non-literal address. When set to 'false'
# such candidates will be ignored.
resolve-remote-candidates = false
# The nomination strategy to use for ICE. THe accepted values are defined in ice4j's NominationStrategy:
# "NominateFirstValid", "NominateHighestPriority", "NominateFirstHostOrReflexiveValid", or "NominateBestRTT".
nomination-strategy = "NominateFirstValid"
}
transport {
send {
# The size of the dtls-transport outgoing queue. This is a per-participant
# queue. Packets from the egress end-up in this queue right before
# transmission by the outgoing srtp pipeline (which mainly consists of the
# packet sender).
#
# Its size needs to be of the same order of magnitude as the rtp sender
# queue. In a 100 participant call, assuming 300pps for the on-stage and
# 100pps for low-definition, last-n 20 and 2 participants talking, so
# 2*50pps for audio, this queue is fed 300+19*100+2*50 = 2300pps, so its
# size in terms of millis is 1024/2300*1000 ~= 445ms.
queue-size=1024
}
}
version {
// Wheather to announe the jitsi-videobridge version to clients in the ServerHello message.
announce = false
}
}