Googling does not helps, so, made own bicycle.
Quick inliner to get variable length in FreeSwitch.
In $1 assuming our variable.
dest_len=${lua(~stream:write(tostring("$1"):len()))}
Thursday, October 12, 2017
Thursday, September 28, 2017
OpenSIPS proxy with auth on external trunk
A new task has arrived.
Make OpenSIPS (actually, here can be Kamailio as well) handle registration to external service, but also provide capability to dial out from other trunks, that are not aware of any credentials of other trunk and know about only OpenSIPS as a trunk. So, in some sort it's b2b.
Very simplified scheme here. But the main issue here - on INVITE with auth headers you have to increase CSeq number by 1. Actually creating new dialog with side B. But you still have to hold old dialog with side A. And you have dynamically change on requests CSeq number.
On question, "is there any OpenSIPS-wise way of making that", got an answer
Make OpenSIPS (actually, here can be Kamailio as well) handle registration to external service, but also provide capability to dial out from other trunks, that are not aware of any credentials of other trunk and know about only OpenSIPS as a trunk. So, in some sort it's b2b.
1. A -> INVITE -> OpenSIPS B
2. A OpenSIPS -> INVITE -> B
3. A OpenSIPS <- 401(7) <- B
4. A OpenSIPS -> INVITE (auth) -> B
5. A OpenSIPS <- 200 <- B
6. A <- 200 <- OpenSIPS
Very simplified scheme here. But the main issue here - on INVITE with auth headers you have to increase CSeq number by 1. Actually creating new dialog with side B. But you still have to hold old dialog with side A. And you have dynamically change on requests CSeq number.
On question, "is there any OpenSIPS-wise way of making that", got an answer
"Right now OpenSIPs does not support increasing the cseq during UAC authentication. At the end this is a limitation of the a proxy versus a B2B :)"
So, nothing is left, only to change it by hand.
So, parts of the script would looks like
...
#------------ uac related parts
loadmodule "uac_auth.so"
modparam("uac_auth","auth_realm_avp","$avp(uac_realm)")
modparam("uac_auth","auth_username_avp","$avp(uac_username)")
modparam("uac_auth","auth_password_avp","$avp(uac_password)")
loadmodule "uac.so"
loadmodule "uac_registrant.so"
modparam("uac_registrant", "db_url", "mysql://astercc:astercc@localhost/opensips")
modparam("uac_registrant", "timer_interval", 120)
modparam("uac_registrant", "hash_size", 2)
# In DB we're storing info on external services where to register
modparam("uac_registrant", "db_url", DBURL)
...
route {
...
# Handle sequential requests part
if (has_totag()) {
if (loose_route()) {
...
} else {
...
if (is_method("ACK") && isflagset(AUTH_DONE)) {
# Process ACK's
if ($cs == $avp(original_cseq)) {
route(INCREASE_CSEQ);
}
}
...
}
}
...
# CANCEL processing
if (is_method("CANCEL")) {
# Addition of process CSeq in a case of auth call
if (isflagset(AUTH_DONE)) {
# Process CANCEL's to both sides
if ($cs == $avp(original_cseq)) {
route(INCREASE_CSEQ);
} else {
route(RESTORE_CSEQ);
}
}
...
}
...
# INVITE processing
if (is_method("INVITE")) {
...
# Here to find out call to external auth trunk
if (...) {
$avp(original_cseq) = $cs;
setflag(IS_OUTBOUND_CALL);
# Also point, here, if we already know that trunk with username and pass, provider may require specific Form and To fields
#if (...) {
# uac_replace_to("sip:$tU@$rd");
# uac_replace_from("sip:$avp(uac_username)@$rd");
#}
}
...
}
onreply_route[1] {
...
# On reply just restore original CSeq.
route(RESTORE_CSEQ);
...
}
failure_route[1] {
...
# Authentication reply on outbound call received?
if (t_check_status("40[17]") && isflagset(IS_OUTBOUND_CALL)) {
# Have we already tried to authenticate?
if (isflagset(AUTH_DONE)) {
t_reply("503","Authentication failed");
exit;
}
# Set flag of auth preformed
setflag(AUTH_DONE);
# Getting realm from responce
# Get Proxy-Auth header
if ($(<reply>hdr(Proxy-Authenticate))) {
$var(raw_auth) = $(<reply>hdr(Proxy-Authenticate));
}
# Prefer WWW-Authenticate to Proxy-Authenticate
if ($(<reply>hdr(WWW-Authenticate))) {
$var(raw_auth) = $(<reply>hdr(WWW-Authenticate));
}
$var(reg_start) = "/(.*?)realm=\"//g";
$var(reg_end) = "/\"(.*)//g";
$var(raw_auth) = $(var(raw_auth){re.subst,$var(reg_start)});
$avp(uac_realm) = $(var(raw_auth){re.subst,$var(reg_end)});
# --- Here we assume, that avp(uac_username) and avp(uac_password) are set elsewhere
if (uac_auth()) {
route(INCREASE_CSEQ);
} else {
exit;
}
route(RELAY);
}
...
}
route[RESTORE_CSEQ] {
if (isflagset(AUTH_DONE) && is_avp_set("$avp(original_cseq)")) {
remove_hf("CSeq:");
append_hf("CSeq: $avp(original_cseq) $rm\r\n", "Call-ID");
}
}
route[INCREASE_CSEQ] {
if (isflagset(AUTH_DONE) && is_avp_set("$avp(original_cseq)")) {
$var(inc_cseq) = $(avp(original_cseq){s.int}) + 1;
remove_hf("CSeq:");
append_hf("CSeq: $var(inc_cseq) $rm\r\n", "Call-ID");
}
}
Wednesday, August 9, 2017
Freeswitch HA mode based on Keepalived.
Idea to have 2 instances of Freeswitch in Master-Slave mode, if one node dies, second will automatically takeover calls with 2-3 seconds lag. Yep, media would be restored.
Main scheme is looks like this
We need 3 IP's. 2 for nodes and 1 for floating.
Solution based on this, but with a bit of additional config.
Used Debian 8x64 as a host OS.
Assume, that we have FreeSWITCH'es already installed.
Install keepalived on both nodes.
apt-get install keepalived
Create files on both nodes
/etc/keepalived/keepalived.conf
global_defs {
router_id FREESW
}
vrrp_script chk_fs {
script "/etc/keepalived/scripts/ka-status.pl"
interval 1
}
vrrp_instance VI_FREESW {
# replace with SLAVE on a slave node
state MASTER
# change to SLAVE on slave node
interface <YOUR_INTERFACE_HERE>
#interface eth0
virtual_router_id <YOUR_ROUTER_ID>
# virtual_router_id 15
# higher is preferred for master
# disable to have failover be sticky
priority 1
advert_int 1
unicast_src_ip <CURRENT_NODE_IP>
#unicast_src_ip 192.168.10.10
unicast_peer {
<SLAVE_NODE_IP>
#192.168.10.11
}
authentication {
auth_type PASS
auth_pass YourPassHere
}
notify "/etc/keepalived/scripts/ka-notify.pl"
virtual_ipaddress {
<FLOATING_IP/CIDR> dev <YOUR_INTERFACE_HERE>
# 192.168.0.15/24 dev eth0 - Example
}
track_script {
chk_fs
}
}
/etc/keepalived/scripts/ka-notify.pl
#!/usr/bin/perl
# INSTANCE|VI_FREESW|BACKUP|50
my ($what,$id,$state,$prio) = @ARGV;
open(STDOUT, "|/usr/bin/logger -t ka-notify");
print "what($what) id($id) state($state) prio($prio)\n";
if ( $state eq "MASTER" )
{
print "Instance went to master, issuing sofia recover.\n";
system("/usr/bin/fs_cli", "-x", "sofia recover");
}
/etc/keepalived/scripts/ka-status.pl
#!/usr/bin/perl
use Sys::Syslog;
openlog "ka-status", "ndelay,pid", "local0";
my @required = ("internal", "external");
my %saw = ();
open(my $in, "-|") || exec("/usr/bin/fs_cli", "-x", "sofia xmlstatus");
while ( defined(my $line = <$in>) )
{
if ( $line =~ m|<name>(.*)</name>|o )
{
$saw{$1} = 1;
}
}
close($in);
foreach my $profile ( @required )
{
if ( ! $saw{$profile} )
{
syslog(LOG_INFO, "sip profile $profile not found, marking failure");
exit(1);
}
}
exit(0);
chmod +x /etc/keepalived/scripts/*.pl
echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
Make sure, that FreeSWITCH'es uses same runtime database and uses same hostname in switch.conf Also profiles you control, need to listen on FLOATING_IP address.
Better reboot here.
service keepalived start
To check state, best is to use ip addr show. On which node FLOATING_IP address is, this is master :)
You can play with priority parameter in keepalived.conf file to make one node Master at all cases.
Point, there is a issue, when you restore calls on one node, you can't get same calls back with sofia recover.
Main scheme is looks like this
We need 3 IP's. 2 for nodes and 1 for floating.
Solution based on this, but with a bit of additional config.
Used Debian 8x64 as a host OS.
Assume, that we have FreeSWITCH'es already installed.
Install keepalived on both nodes.
apt-get install keepalived
Create files on both nodes
/etc/keepalived/keepalived.conf
global_defs {
router_id FREESW
}
vrrp_script chk_fs {
script "/etc/keepalived/scripts/ka-status.pl"
interval 1
}
vrrp_instance VI_FREESW {
# replace with SLAVE on a slave node
state MASTER
# change to SLAVE on slave node
interface <YOUR_INTERFACE_HERE>
#interface eth0
virtual_router_id <YOUR_ROUTER_ID>
# virtual_router_id 15
# higher is preferred for master
# disable to have failover be sticky
priority 1
advert_int 1
unicast_src_ip <CURRENT_NODE_IP>
#unicast_src_ip 192.168.10.10
unicast_peer {
<SLAVE_NODE_IP>
#192.168.10.11
}
authentication {
auth_type PASS
auth_pass YourPassHere
}
notify "/etc/keepalived/scripts/ka-notify.pl"
virtual_ipaddress {
<FLOATING_IP/CIDR> dev <YOUR_INTERFACE_HERE>
# 192.168.0.15/24 dev eth0 - Example
}
track_script {
chk_fs
}
}
/etc/keepalived/scripts/ka-notify.pl
#!/usr/bin/perl
# INSTANCE|VI_FREESW|BACKUP|50
my ($what,$id,$state,$prio) = @ARGV;
open(STDOUT, "|/usr/bin/logger -t ka-notify");
print "what($what) id($id) state($state) prio($prio)\n";
if ( $state eq "MASTER" )
{
print "Instance went to master, issuing sofia recover.\n";
system("/usr/bin/fs_cli", "-x", "sofia recover");
}
/etc/keepalived/scripts/ka-status.pl
#!/usr/bin/perl
use Sys::Syslog;
openlog "ka-status", "ndelay,pid", "local0";
my @required = ("internal", "external");
my %saw = ();
open(my $in, "-|") || exec("/usr/bin/fs_cli", "-x", "sofia xmlstatus");
while ( defined(my $line = <$in>) )
{
if ( $line =~ m|<name>(.*)</name>|o )
{
$saw{$1} = 1;
}
}
close($in);
foreach my $profile ( @required )
{
if ( ! $saw{$profile} )
{
syslog(LOG_INFO, "sip profile $profile not found, marking failure");
exit(1);
}
}
exit(0);
chmod +x /etc/keepalived/scripts/*.pl
echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
Make sure, that FreeSWITCH'es uses same runtime database and uses same hostname in switch.conf Also profiles you control, need to listen on FLOATING_IP address.
Better reboot here.
service keepalived start
To check state, best is to use ip addr show. On which node FLOATING_IP address is, this is master :)
You can play with priority parameter in keepalived.conf file to make one node Master at all cases.
Point, there is a issue, when you restore calls on one node, you can't get same calls back with sofia recover.
FusionPBX/FreeSWITCH save CLID on transfer
Idea is when call is received, transfer to next destination should come with callerID was received originally, not updated in moment of transfer. Sometimes it's needed for correct CRM integration or peoples just get used to it, cause it's default with blind transfer.
Actually, same ideas that described here, but for use in FusionPBX.
1. Create context save_transfer with order ~85
condition - <empty>
action - export - nolocal:execute_on_answer_1=lua number_save_on_transfer_store.lua
action - export - api_hangup_hook=lua number_save_on_transfer_db_cleanup.lua
2. Create context restore_transfer with order higher, than save_trasfer, ~80
condition - ${db(exists/number_transfer_store/${sip_from_user})} - ^true$
action - set - restored_number_on_transfer=${db(select/number_transfer_store/${sip_from_user})} - inline
action - set - effective_caller_id_number=${restored_number_on_transfer}
action - set - effective_caller_id_name=${restored_number_on_transfer}
action - db - delete/number_transfer_store/${sip_from_user}
condition - ${db(exists/number_transfer_store/${sip_from_user}_name)} - ^true$
action - set - restored_number_on_transfer_name=${db(select/number_transfer_store/${sip_from_user}_name)} - inline
action - set - effective_caller_id_name=${restored_number_on_transfer_name}
action - db - delete/number_transfer_store/${sip_from_user}_name
Lua files:
/usr/share/freeswitch/scripts/number_save_on_transfer_store.lua
-- Save number_answered / original caller_id to database
--api = freeswitch.API()
if (session:ready()) then
answered_extension = session:getVariable("dialed_user")
caller_id = session:getVariable("restored_number_on_transfer")
caller_name = session:getVariable("restored_number_on_transfer_name")
if (caller_id == nil) then
caller_id = session:getVariable("sip_from_user")
end
if (caller_name == nil) then
caller_name = session:getVariable("sip_from_display")
end
if (answered_extension ~= nil and caller_id ~= nil) then
freeswitch.consoleLog("INFO", "[NUMBER_ON_TRANSFER_SAVE] Got answered call from "..caller_id.." to "..answered_extension.."\n")
session:execute('db', 'insert/number_transfer_store/'..answered_extension..'/'..caller_id)
if (caller_name ~= nil) then
session:execute('db', 'insert/number_transfer_store/'..answered_extension..'_name/'..caller_name)
end
end
end
/usr/share/freeswitch/scripts/number_save_on_transfer_db_cleanup.lua
-- Cleanup database
api = freeswitch.API()
sip_to_user = env:getHeader("variable_last_sent_callee_id_number")
if (sip_to_user ~= nil) then
--serialized = env:serialize()
--freeswitch.consoleLog("INFO","[hangup]\n" .. serialized .. "\n")
freeswitch.consoleLog("INFO", "[DB_CLEANUP] Cleaning " .. sip_to_user .. "\n")
api:executeString('db delete/number_transfer_store/'..sip_to_user)
api:executeString('db delete/number_transfer_store/'..sip_to_user..'_name')
end
P.S.: At the end, db is can be easily replaced with hash. As we don't need persistence storage here.
Actually, same ideas that described here, but for use in FusionPBX.
1. Create context save_transfer with order ~85
condition - <empty>
action - export - nolocal:execute_on_answer_1=lua number_save_on_transfer_store.lua
action - export - api_hangup_hook=lua number_save_on_transfer_db_cleanup.lua
2. Create context restore_transfer with order higher, than save_trasfer, ~80
condition - ${db(exists/number_transfer_store/${sip_from_user})} - ^true$
action - set - restored_number_on_transfer=${db(select/number_transfer_store/${sip_from_user})} - inline
action - set - effective_caller_id_number=${restored_number_on_transfer}
action - set - effective_caller_id_name=${restored_number_on_transfer}
action - db - delete/number_transfer_store/${sip_from_user}
condition - ${db(exists/number_transfer_store/${sip_from_user}_name)} - ^true$
action - set - restored_number_on_transfer_name=${db(select/number_transfer_store/${sip_from_user}_name)} - inline
action - set - effective_caller_id_name=${restored_number_on_transfer_name}
action - db - delete/number_transfer_store/${sip_from_user}_name
Lua files:
/usr/share/freeswitch/scripts/number_save_on_transfer_store.lua
-- Save number_answered / original caller_id to database
--api = freeswitch.API()
if (session:ready()) then
answered_extension = session:getVariable("dialed_user")
caller_id = session:getVariable("restored_number_on_transfer")
caller_name = session:getVariable("restored_number_on_transfer_name")
if (caller_id == nil) then
caller_id = session:getVariable("sip_from_user")
end
if (caller_name == nil) then
caller_name = session:getVariable("sip_from_display")
end
if (answered_extension ~= nil and caller_id ~= nil) then
freeswitch.consoleLog("INFO", "[NUMBER_ON_TRANSFER_SAVE] Got answered call from "..caller_id.." to "..answered_extension.."\n")
session:execute('db', 'insert/number_transfer_store/'..answered_extension..'/'..caller_id)
if (caller_name ~= nil) then
session:execute('db', 'insert/number_transfer_store/'..answered_extension..'_name/'..caller_name)
end
end
end
/usr/share/freeswitch/scripts/number_save_on_transfer_db_cleanup.lua
-- Cleanup database
api = freeswitch.API()
sip_to_user = env:getHeader("variable_last_sent_callee_id_number")
if (sip_to_user ~= nil) then
--serialized = env:serialize()
--freeswitch.consoleLog("INFO","[hangup]\n" .. serialized .. "\n")
freeswitch.consoleLog("INFO", "[DB_CLEANUP] Cleaning " .. sip_to_user .. "\n")
api:executeString('db delete/number_transfer_store/'..sip_to_user)
api:executeString('db delete/number_transfer_store/'..sip_to_user..'_name')
end
P.S.: At the end, db is can be easily replaced with hash. As we don't need persistence storage here.
Tuesday, June 27, 2017
Freeswitch/Fusion stop/resume record on the fly
Most common solution to pause/resume recording on Freeswitch is use stop_record_session and after - record_session along with RECORD_APPEND=true.
What is bad with this solution.
1. Only WAV is supported
2. Seems it's broken for now (1.6.18)
Other option to use is
uuid_record mask/unmask
For FusionPBX it will looks like
action - bind_digit_action - local,*1,api:uuid_record,${uuid} mask ${recordings_dir}/${domain_name}/archive/${strftime(%Y)}/${strftime(%b)}/${strftime(%d)}/${uuid}.${record_ext},${bind_target}
action - bind_digit_action - local,*2,api:uuid_record,${uuid} unmask ${recordings_dir}/${domain_name}/archive/${strftime(%Y)}/${strftime(%b)}/${strftime(%d)}/${uuid}.${record_ext},${bind_target}
So, you may press *1 to stop recording and *2 to resume it. Bad in this solution - you will have pauses in conversation while record is masked.
What is bad with this solution.
1. Only WAV is supported
2. Seems it's broken for now (1.6.18)
Other option to use is
uuid_record mask/unmask
For FusionPBX it will looks like
action - bind_digit_action - local,*1,api:uuid_record,${uuid} mask ${recordings_dir}/${domain_name}/archive/${strftime(%Y)}/${strftime(%b)}/${strftime(%d)}/${uuid}.${record_ext},${bind_target}
action - bind_digit_action - local,*2,api:uuid_record,${uuid} unmask ${recordings_dir}/${domain_name}/archive/${strftime(%Y)}/${strftime(%b)}/${strftime(%d)}/${uuid}.${record_ext},${bind_target}
So, you may press *1 to stop recording and *2 to resume it. Bad in this solution - you will have pauses in conversation while record is masked.
Sunday, June 11, 2017
Freeswitch Postgres database scheme
Freeswitch has a great option to put internal database from SQL to Postgres. But in a case if Postgres DB is BDR (as I've described here), there is need some additional work on this database.
So, I've created SQL file to use, based on DigiDaz file.
You can find it here. Don't forget to set
<param name="auto-create-schemas" value="false"/>
<param name="auto-clear-sql" value="false"/>
in your switch.comf.xml file.
Also few things about BDR. You can't add primary key to existing database in one command. But you can use thing like:
ALTER TABLE tbl ADD COLUMN tbl_uuid uuid;
ALTER TABLE tbl ALTER COLUMN tbl_uuid SET DEFAULT gen_random_uuid();
ALTER TABLE tbl ADD PRIMARY KEY (tbl_uuid);
Monday, May 29, 2017
Fax in FreeSwitch
Just a link not to forget where to get variables related to faxing in FreeSwitch
Examples in source
And explanations on Youtube
Examples in source
And explanations on Youtube
Tuesday, February 14, 2017
Kamailio limit CPS/CPM/Concurrent calls per gateway.
Small task to make some generic limiters for calls per gateway, which in my case - destination.
Limiting concurrent calls is based on dialog module, limiting CPS/CPM - on htable module, actual solution taken from here.
On a modparam I'm pointing only on settings, that needed for this solution, so, if you want to adopt it to your case - use your settings along with provided.
loadmodule "htable.so"
...
modparam("htable", "htable", "rhs=>size=32;initval=0;autoexpire=300;")
modparam("htable", "htable", "rhm=>size=32;initval=0;autoexpire=1800;")
loadmodule "dialog.so"
...
modparam("dialog", "profiles_with_value", "concurrent_calls")
...
route[LIMIT_CALLS] {
# Limit oncurrent calls per gateway
if (!dlg_isflagset("1")) {
if (get_profile_size("concurrent_calls", "$td", "$avp(calls)")) {
$avp(concCallLimit) = <ACTUAL_CONCURRENT_CALLS_LIMIT_HERE>;
if ($avp(calls) >= $avp(concCallLimit)) {
xlog("L_INFO", "Concurrent calls to $td limit reached");
send_reply("503", "Calls limit reached");
exit;
} else {
dlg_manage();
dlg_setflag("1");
set_dlg_profile("concurrent_calls","$td");
}
}
}
$avp(rateHashSec) = "$td:sec:"+$timef(%Y/%m/%d_%H_%M_%S);
$avp(rateHashMin) = "$td:min:"+$timef(%Y/%m/%d_%H_%M_00);
$avp(ratePerSec) = $shtinc(rhs=>$avp(rateHashSec));
$avp(ratePerMin) = $shtinc(rhm=>$avp(rateHashMin));
$avp(limitPerSec) = <ACTUAL_LIMIT_PER_SECOND_HERE>;
$avp(limitPerMin) = <ACTUAL_LIMIT_PER_MINUTE_HERE>;
if ($avp(ratePerSec) > $avp(limitPerSec) || $avp(ratePerMin) > $avp(limitPerMin)) {
xlog("L_INFO", "CPS/CPM Limit on $td");
send_reply("503", "CPS/CPM Limit on $td");
exit;
}
}
Main idea in concurrent calls - use dialog profiles and ability to count them. Point, it can be used only in stateful mode, so dialogs are tracked and deleted after it's end. Flag "1" is used not to count same dialog several times.
Idea in CPS/CPM limiter - using htable with keys, that can be accessible only in specific time due name formation rules.
And $td is used to limit to current destination, that was set up previously (no touching this part)
This one is not very best for copy-paste, so, adopt it to your needs :)
Limiting concurrent calls is based on dialog module, limiting CPS/CPM - on htable module, actual solution taken from here.
On a modparam I'm pointing only on settings, that needed for this solution, so, if you want to adopt it to your case - use your settings along with provided.
loadmodule "htable.so"
...
modparam("htable", "htable", "rhs=>size=32;initval=0;autoexpire=300;")
modparam("htable", "htable", "rhm=>size=32;initval=0;autoexpire=1800;")
loadmodule "dialog.so"
...
modparam("dialog", "profiles_with_value", "concurrent_calls")
...
request_route {
...
if (is_method("INVITE")) {
route(LIMIT_CALLS)
}
...
}
route[LIMIT_CALLS] {
# Limit oncurrent calls per gateway
if (!dlg_isflagset("1")) {
if (get_profile_size("concurrent_calls", "$td", "$avp(calls)")) {
$avp(concCallLimit) = <ACTUAL_CONCURRENT_CALLS_LIMIT_HERE>;
if ($avp(calls) >= $avp(concCallLimit)) {
xlog("L_INFO", "Concurrent calls to $td limit reached");
send_reply("503", "Calls limit reached");
exit;
} else {
dlg_manage();
dlg_setflag("1");
set_dlg_profile("concurrent_calls","$td");
}
}
}
$avp(rateHashSec) = "$td:sec:"+$timef(%Y/%m/%d_%H_%M_%S);
$avp(rateHashMin) = "$td:min:"+$timef(%Y/%m/%d_%H_%M_00);
$avp(ratePerSec) = $shtinc(rhs=>$avp(rateHashSec));
$avp(ratePerMin) = $shtinc(rhm=>$avp(rateHashMin));
$avp(limitPerSec) = <ACTUAL_LIMIT_PER_SECOND_HERE>;
$avp(limitPerMin) = <ACTUAL_LIMIT_PER_MINUTE_HERE>;
if ($avp(ratePerSec) > $avp(limitPerSec) || $avp(ratePerMin) > $avp(limitPerMin)) {
xlog("L_INFO", "CPS/CPM Limit on $td");
send_reply("503", "CPS/CPM Limit on $td");
exit;
}
}
Main idea in concurrent calls - use dialog profiles and ability to count them. Point, it can be used only in stateful mode, so dialogs are tracked and deleted after it's end. Flag "1" is used not to count same dialog several times.
Idea in CPS/CPM limiter - using htable with keys, that can be accessible only in specific time due name formation rules.
And $td is used to limit to current destination, that was set up previously (no touching this part)
This one is not very best for copy-paste, so, adopt it to your needs :)
Friday, February 10, 2017
Postgres Master-Master replication with BDR
This article will describe steps to get master-master replication for Postgres SQL on Debian Jessie.
Made mostly for glorious Copy-Paste with minor descriptions.
So, we will use BDR (B-Directional Replication) extension from 2ndQadrant.
Installation instructions can be found on page, but I'll describe them also here for Debian Jessie on February 2017 (currently in repos we have Postgres 9.6)
So, installation steps are to this:
1. Backup existing data with pgdump. For sure, use your own databases instead of database1 (On a first node)
pg_dump database1 -f backup_database1.sql
2. Get info about users and rights.
3. Very discussable step here - remove entire existing cluster (I have a problems with versions of bdr_init_copy, doesn't have much time to play around, for me it was easier to recreate all databases)
pg_dropcluster 9.6 main --stop
apt-get remove postgresql-9.6
4. Install Postgres with BDR extension. (Both nodes)
echo 'deb http://packages.2ndquadrant.com/bdr/apt/ jessie-2ndquadrant main' >> /etc/apt/sources.list
wget --quiet -O - http://packages.2ndquadrant.com/bdr/apt/AA7A6805.asc | sudo apt-key add -
apt-get update
apt-get install -y postgresql-bdr-9.4 postgresql-bdr-9.4-bdr-plugin php5-pgsql postgresql-bdr-contrib-9.4
5. Modify files in /etc/postgresql/9.4/main/ (Both nodes)
postgresql.conf (Add this to end)
listen_addresses = '*'
shared_preload_libraries = 'bdr'
wal_level = 'logical'
track_commit_timestamp = on
max_connections = 100
max_wal_senders = 10
max_replication_slots = 10
Point - don't forget to protect yourself with iptables - now postgres listens to all interfaces!
pg_hba.conf
hostssl all all x.x.x.x/32 trust # Own IP address
hostssl all all z.z.z.z/32 trust # Second node IP address
hostssl replication postgres x.x.x.x/32 trust # Own IP address
hostssl replication postgres z.z.z.z/32 trust # Second node IP address
6. Restart Postgres (both nodes)
service postgres restart
7. Create database and users for it (both nodes)
CREATE DATABASE database1;
CREATE ROLE database1_user WITH SUPERUSER LOGIN PASSWORD 'SuperPass';
GRANT ALL PRIVILEGES ON DATABASE database1 TO database1_user;
8. Create BDR extension on this database (Both nodes)
\c database1
create extension pgcrypto;
create extension btree_gist;
create extension bdr;
Check it with
SELECT bdr.bdr_variant();
SELECT bdr.bdr_version();
Should be like this
database1=# SELECT bdr.bdr_variant();
bdr_variant
-------------
BDR
(1 row)
database1=# SELECT bdr.bdr_version();
bdr_version
-------------------
1.0.2-2016-11-11-
(1 row)
9. Create first master node (Only first node!)
SELECT bdr.bdr_group_create(local_node_name := 'node1', node_external_dsn := 'host=<OWN EXTERNAL IP> port=5432 dbname=database1');
Check it with
SELECT bdr.bdr_node_join_wait_for_ready();
Should be like
database1=# SELECT bdr.bdr_node_join_wait_for_ready();
bdr_node_join_wait_for_ready
------------------------------
(1 row)
10. Create second master node (Only second node!)
SELECT bdr.bdr_group_join(local_node_name := 'node2', node_external_dsn := 'host=<OWN EXTERNAL IP> port=5432 dbname= database1', join_using_dsn := 'host=<NODE1 EXTERNAL IP> port=5432 dbname= database1');
Check with
SELECT bdr.bdr_node_join_wait_for_ready();
Should be same as p.9.
11. Restore database data (any node, but only one of)
psql database1 < backup_database1.sql
Yes, maybe not the best solution, but works for me. But also, if you can't drop your data or other reason you can't delete database, you can use on 2nd node bdr_init_copy, but I'll leave task to do this to you :)
Made mostly for glorious Copy-Paste with minor descriptions.
So, we will use BDR (B-Directional Replication) extension from 2ndQadrant.
Installation instructions can be found on page, but I'll describe them also here for Debian Jessie on February 2017 (currently in repos we have Postgres 9.6)
So, installation steps are to this:
1. Backup existing data with pgdump. For sure, use your own databases instead of database1 (On a first node)
pg_dump database1 -f backup_database1.sql
2. Get info about users and rights.
3. Very discussable step here - remove entire existing cluster (I have a problems with versions of bdr_init_copy, doesn't have much time to play around, for me it was easier to recreate all databases)
pg_dropcluster 9.6 main --stop
apt-get remove postgresql-9.6
4. Install Postgres with BDR extension. (Both nodes)
echo 'deb http://packages.2ndquadrant.com/bdr/apt/ jessie-2ndquadrant main' >> /etc/apt/sources.list
wget --quiet -O - http://packages.2ndquadrant.com/bdr/apt/AA7A6805.asc | sudo apt-key add -
apt-get update
apt-get install -y postgresql-bdr-9.4 postgresql-bdr-9.4-bdr-plugin php5-pgsql postgresql-bdr-contrib-9.4
5. Modify files in /etc/postgresql/9.4/main/ (Both nodes)
postgresql.conf (Add this to end)
listen_addresses = '*'
shared_preload_libraries = 'bdr'
wal_level = 'logical'
track_commit_timestamp = on
max_connections = 100
max_wal_senders = 10
max_replication_slots = 10
Point - don't forget to protect yourself with iptables - now postgres listens to all interfaces!
pg_hba.conf
hostssl all all x.x.x.x/32 trust # Own IP address
hostssl all all z.z.z.z/32 trust # Second node IP address
hostssl replication postgres x.x.x.x/32 trust # Own IP address
hostssl replication postgres z.z.z.z/32 trust # Second node IP address
6. Restart Postgres (both nodes)
service postgres restart
7. Create database and users for it (both nodes)
CREATE DATABASE database1;
CREATE ROLE database1_user WITH SUPERUSER LOGIN PASSWORD 'SuperPass';
GRANT ALL PRIVILEGES ON DATABASE database1 TO database1_user;
8. Create BDR extension on this database (Both nodes)
\c database1
create extension pgcrypto;
create extension btree_gist;
create extension bdr;
Check it with
SELECT bdr.bdr_variant();
SELECT bdr.bdr_version();
Should be like this
database1=# SELECT bdr.bdr_variant();
bdr_variant
-------------
BDR
(1 row)
database1=# SELECT bdr.bdr_version();
bdr_version
-------------------
1.0.2-2016-11-11-
(1 row)
9. Create first master node (Only first node!)
SELECT bdr.bdr_group_create(local_node_name := 'node1', node_external_dsn := 'host=<OWN EXTERNAL IP> port=5432 dbname=database1');
Check it with
SELECT bdr.bdr_node_join_wait_for_ready();
Should be like
database1=# SELECT bdr.bdr_node_join_wait_for_ready();
bdr_node_join_wait_for_ready
------------------------------
(1 row)
10. Create second master node (Only second node!)
SELECT bdr.bdr_group_join(local_node_name := 'node2', node_external_dsn := 'host=<OWN EXTERNAL IP> port=5432 dbname= database1', join_using_dsn := 'host=<NODE1 EXTERNAL IP> port=5432 dbname= database1');
Check with
SELECT bdr.bdr_node_join_wait_for_ready();
Should be same as p.9.
11. Restore database data (any node, but only one of)
psql database1 < backup_database1.sql
Yes, maybe not the best solution, but works for me. But also, if you can't drop your data or other reason you can't delete database, you can use on 2nd node bdr_init_copy, but I'll leave task to do this to you :)
Tuesday, February 7, 2017
Vultr HA. Some additions to existing article.
So, now I've switched from DigitalOcean completely to Vultr. To be honest - mostly of price, cause it's a bit cheaper. And accepts debit cards.
And also had a chance to set up HA on this cloud provider followed on this article.
But some additions to it for complete algo.
1. Order a BGP with support ticket with Vultr provided AS.
2. Purchase a Reserved IP address, but not assign it to none of instances
3. Order 2 instances in same location
4. Set up network on both instances according to article.
4.1 Use Reserved IP address as dummy1 interface IP
4.2 Use /32 subnet mask regardless on what you see in Reserved IP settings.
4.3 Use Bird settings from server instance BGP page (BGP configuration link) not to mistaken with AS addresses and passwords.
And also had a chance to set up HA on this cloud provider followed on this article.
But some additions to it for complete algo.
1. Order a BGP with support ticket with Vultr provided AS.
2. Purchase a Reserved IP address, but not assign it to none of instances
3. Order 2 instances in same location
4. Set up network on both instances according to article.
4.1 Use Reserved IP address as dummy1 interface IP
4.2 Use /32 subnet mask regardless on what you see in Reserved IP settings.
4.3 Use Bird settings from server instance BGP page (BGP configuration link) not to mistaken with AS addresses and passwords.
Saturday, January 21, 2017
Asterisk (localnet/exteraddr) -> Freeswitch
Just to remember one option.
Bu default, when Asterisk working behind NAT and parameters like localnet and externaddr are set properly, it automatically uses internal address in SIP/SDP messages for internal connections and external for external.
Freeswitch can do same, but needed a bit more option to use.
Profile settings
<param name="ext-sip-ip" value="autonat:$${external_sip_ip}"/>
<param name="ext-rtp-ip" value="autonat:$${external_rtp_ip}"/>
<param name="local-network-acl" value="localnet.auto"/>
Point on autonat: prefix. Yes, I know it described in wiki, but never the less. Without it, FreeSwitch will use ext-rtp-ip address in all messages regardless are they internal or external.
Bu default, when Asterisk working behind NAT and parameters like localnet and externaddr are set properly, it automatically uses internal address in SIP/SDP messages for internal connections and external for external.
Freeswitch can do same, but needed a bit more option to use.
Profile settings
<param name="ext-sip-ip" value="autonat:$${external_sip_ip}"/>
<param name="ext-rtp-ip" value="autonat:$${external_rtp_ip}"/>
<param name="local-network-acl" value="localnet.auto"/>
Point on autonat: prefix. Yes, I know it described in wiki, but never the less. Without it, FreeSwitch will use ext-rtp-ip address in all messages regardless are they internal or external.
Tuesday, January 17, 2017
FusionPBX parking based on mod_fifo
Got an interesting task recently. To make parking slots on FusionPBX like asterisk parking. With BLF support, but without work "park+<slot>", like mod_valet_parking wants to.
Actually, googling shows this solution.
I've just modified it a bit to fit Fusion logic + make BLF more stable.
Here I use *771 is single slot parking extension.
<extension name="ParkExtension771">
<condition field="destination_number" expression="^\*771$" break="on-false">
<action inline="true" application="set" data="presence_id=${destination_number}@${domain_name}"/>
<action inline="true" application="set" data="slot_count=${fifo(count ${destination_number}@${domain_name})}"/>
<action inline="true" application="set" data="slot_count=${slot_count:-9:2}"/>
</condition>
<condition field="${slot_count}" expression="^\:0|no$" break="always">
<action application="unset" data="fifo_chime_list"/>
<action application="set" data="fifo_chime_freq=0"/>
<action application="fifo" data="${destination_number}@${domain_name} in undef ${hold_music}"/>
<anti-action application="fifo" data="${destination_number}@${domain_name} out nowait"/>
</condition>
</extension>
Actually, almost same with Confluence solution, but need add presence_id variable set and make in inline (without inline BLF is not working on transfer calls, only on direct calls)
Also add additional condition to slot_count. It's for initialize fifo slot on first time.
So, just set your BLF key to *771 (in this case) and this is your parking slot number. When it's red - you have line on it.
"Your wife is on line 2", like in old US police TV-series :)
Actually, googling shows this solution.
I've just modified it a bit to fit Fusion logic + make BLF more stable.
Here I use *771 is single slot parking extension.
<extension name="ParkExtension771">
<condition field="destination_number" expression="^\*771$" break="on-false">
<action inline="true" application="set" data="presence_id=${destination_number}@${domain_name}"/>
<action inline="true" application="set" data="slot_count=${fifo(count ${destination_number}@${domain_name})}"/>
<action inline="true" application="set" data="slot_count=${slot_count:-9:2}"/>
</condition>
<condition field="${slot_count}" expression="^\:0|no$" break="always">
<action application="unset" data="fifo_chime_list"/>
<action application="set" data="fifo_chime_freq=0"/>
<action application="fifo" data="${destination_number}@${domain_name} in undef ${hold_music}"/>
<anti-action application="fifo" data="${destination_number}@${domain_name} out nowait"/>
</condition>
</extension>
Actually, almost same with Confluence solution, but need add presence_id variable set and make in inline (without inline BLF is not working on transfer calls, only on direct calls)
Also add additional condition to slot_count. It's for initialize fifo slot on first time.
So, just set your BLF key to *771 (in this case) and this is your parking slot number. When it's red - you have line on it.
"Your wife is on line 2", like in old US police TV-series :)
Subscribe to:
Posts (Atom)