126
126
#include "../../atomic_ops.h" /* membar_depends() */
129
extern int tm_failure_exec_mode;
130
extern int tm_dns_reuse_rcv_socket;
129
132
static int goto_on_branch = 0, branch_route = 0;
131
134
void t_on_branch( unsigned int go_to )
259
279
parsed_uri_bak=i_req->parsed_uri;
260
280
parsed_uri_ok_bak=i_req->parsed_uri_ok;
261
281
path_bak=i_req->path_vec;
282
instance_bak=i_req->instance;
283
ruid_bak=i_req->ruid;
284
ua_bak=i_req->location_ua;
263
286
if (unlikely(branch_route || has_tran_tmcbs(t, TMCB_REQUEST_FWDED))){
264
287
/* dup uris, path a.s.o. if we have a branch route or callback */
318
/* update instance */
319
/* if instance points to msg instance, it needs to be "fixed" so that we
320
can change/update msg->instance */
321
if (instance==&i_req->instance)
322
instance=&instance_bak;
323
/* zero it first so that set_instance will work */
325
i_req->instance.len=0;
326
if (unlikely(instance)){
327
if (unlikely(set_instance(i_req, instance)<0)){
335
/* if ruid points to msg ruid, it needs to be "fixed" so that we
336
can change/update msg->ruid */
337
if (ruid==&i_req->ruid)
339
/* zero it first so that set_ruid will work */
343
if (unlikely(set_ruid(i_req, ruid)<0)){
350
/* update location_ua */
351
/* if location_ua points to msg location_ua, it needs to be "fixed" so that we
352
can change/update msg->location_ua */
353
if (location_ua==&i_req->location_ua)
355
/* zero it first so that set_ua will work */
356
i_req->location_ua.s=0;
357
i_req->location_ua.len=0;
358
if (unlikely(location_ua)){
359
if (unlikely(set_ua(i_req, location_ua)<0)){
296
366
/* backup dst uri & zero it*/
297
367
dst_uri_bak=i_req->dst_uri;
397
467
i_req->path_vec.s=0;
398
468
i_req->path_vec.len=0;
470
if (unlikely(instance && (i_req->instance.s!=instance->s ||
471
i_req->instance.len!=instance->len))){
472
i_req->instance=*instance;
473
}else if (unlikely(instance==0 && i_req->instance.len!=0)){
475
i_req->instance.len=0;
477
if (unlikely(ruid && (i_req->ruid.s!=ruid->s ||
478
i_req->ruid.len!=ruid->len))){
480
}else if (unlikely(ruid==0 && i_req->ruid.len!=0)){
484
if (unlikely(location_ua && (i_req->location_ua.s!=location_ua->s ||
485
i_req->location_ua.len!=location_ua->len))){
486
i_req->location_ua=*location_ua;
487
}else if (unlikely(location_ua==0 && i_req->location_ua.len!=0)){
488
i_req->location_ua.s=0;
489
i_req->location_ua.len=0;
402
493
if (likely(next_hop!=0 || (flags & UAC_DNS_FAILOVER_F))){
420
511
/* Set on_reply and on_negative handlers for this branch to the handlers in the transaction */
421
512
t->uac[branch].on_reply = t->on_reply;
422
513
t->uac[branch].on_failure = t->on_failure;
514
t->uac[branch].on_branch_failure = t->on_branch_failure;
424
516
/* check if send_sock is ok */
425
517
if (t->uac[branch].request.dst.send_sock==0) {
463
555
t->uac[branch].path.s[i_req->path_vec.len]=0;
464
556
memcpy( t->uac[branch].path.s, i_req->path_vec.s, i_req->path_vec.len);
558
if (unlikely(i_req->instance.s && i_req->instance.len)){
559
t->uac[branch].instance.s=shm_malloc(i_req->instance.len+1);
560
if (unlikely(t->uac[branch].instance.s==0)) {
562
t->uac[branch].request.buffer=0;
563
t->uac[branch].request.buffer_len=0;
564
t->uac[branch].uri.s=0;
565
t->uac[branch].uri.len=0;
569
t->uac[branch].instance.len=i_req->instance.len;
570
t->uac[branch].instance.s[i_req->instance.len]=0;
571
memcpy( t->uac[branch].instance.s, i_req->instance.s, i_req->instance.len);
573
if (unlikely(i_req->ruid.s && i_req->ruid.len)){
574
t->uac[branch].ruid.s=shm_malloc(i_req->ruid.len+1);
575
if (unlikely(t->uac[branch].ruid.s==0)) {
577
t->uac[branch].request.buffer=0;
578
t->uac[branch].request.buffer_len=0;
579
t->uac[branch].uri.s=0;
580
t->uac[branch].uri.len=0;
584
t->uac[branch].ruid.len=i_req->ruid.len;
585
t->uac[branch].ruid.s[i_req->ruid.len]=0;
586
memcpy( t->uac[branch].ruid.s, i_req->ruid.s, i_req->ruid.len);
588
if (unlikely(i_req->location_ua.s && i_req->location_ua.len)){
589
t->uac[branch].location_ua.s=shm_malloc(i_req->location_ua.len+1);
590
if (unlikely(t->uac[branch].location_ua.s==0)) {
592
t->uac[branch].request.buffer=0;
593
t->uac[branch].request.buffer_len=0;
594
t->uac[branch].uri.s=0;
595
t->uac[branch].uri.len=0;
599
t->uac[branch].location_ua.len=i_req->location_ua.len;
600
t->uac[branch].location_ua.s[i_req->location_ua.len]=0;
601
memcpy( t->uac[branch].location_ua.s, i_req->location_ua.s, i_req->location_ua.len);
474
611
if (unlikely(free_path)){
475
612
reset_path_vector(i_req);
614
if (unlikely(free_instance)){
615
reset_instance(i_req);
617
if (unlikely(free_ruid)){
620
if (unlikely(free_ua)){
477
623
if (dst_uri_backed_up){
478
624
reset_dst_uri(i_req); /* free dst_uri */
479
625
i_req->dst_uri=dst_uri_bak;
483
629
i_req->parsed_uri=parsed_uri_bak;
484
630
i_req->parsed_uri_ok=parsed_uri_ok_bak;
485
631
i_req->path_vec=path_bak;
632
i_req->instance=instance_bak;
633
i_req->ruid=ruid_bak;
634
i_req->location_ua=ua_bak;
487
636
/* Delete the duplicated lump lists, this will also delete
488
637
* all lumps created here, such as lumps created in per-branch
607
756
membar_write(); /* to allow lockless prepare_to_cancel() we want to be sure
608
757
all the writes finished before updating branch number*/
609
758
t->nr_of_outgoings=(branch+1);
759
t->async_backup.blind_uac = branch; /* whenever we create a blind UAC, lets save the current branch
760
* this is used in async tm processing specifically to be able to route replies
761
* that were possibly in response to a request forwarded on this blind UAC......
762
* we still want replies to be processed as if it were a normal UAC */
610
764
/* start FR timer -- protocol set by default to PROTO_NONE,
611
765
which means retransmission timer will not be started
646
800
static int add_uac( struct cell *t, struct sip_msg *request, str *uri,
647
801
str* next_hop, str* path, struct proxy_l *proxy,
648
802
struct socket_info* fsocket, snd_flags_t snd_flags,
649
int proto, int flags)
803
int proto, int flags, str *instance, str *ruid,
691
846
/* now message printing starts ... */
692
847
if (unlikely( (ret=prepare_new_uac(t, request, branch, uri, path,
693
848
next_hop, fsocket, snd_flags,
694
proto, flags)) < 0)){
849
proto, flags, instance, ruid,
731
887
struct socket_info* fsocket,
732
888
snd_flags_t send_flags,
734
char *buf, short buf_len)
890
char *buf, short buf_len,
891
str *instance, str *ruid,
802
960
t->uac[branch].path.s[path->len]=0;
803
961
memcpy( t->uac[branch].path.s, path->s, path->len);
963
/* copy the instance */
964
if (unlikely(instance && instance->s)){
965
t->uac[branch].instance.s=shm_malloc(instance->len+1);
966
if (unlikely(t->uac[branch].instance.s==0)) {
968
t->uac[branch].request.buffer=0;
969
t->uac[branch].request.buffer_len=0;
970
t->uac[branch].uri.s=0;
971
t->uac[branch].uri.len=0;
972
ret=ser_error=E_OUT_OF_MEM;
975
t->uac[branch].instance.len=instance->len;
976
t->uac[branch].instance.s[instance->len]=0;
977
memcpy( t->uac[branch].instance.s, instance->s, instance->len);
980
if (unlikely(ruid && ruid->s)){
981
t->uac[branch].ruid.s=shm_malloc(ruid->len+1);
982
if (unlikely(t->uac[branch].ruid.s==0)) {
984
t->uac[branch].request.buffer=0;
985
t->uac[branch].request.buffer_len=0;
986
t->uac[branch].uri.s=0;
987
t->uac[branch].uri.len=0;
988
ret=ser_error=E_OUT_OF_MEM;
991
t->uac[branch].ruid.len=ruid->len;
992
t->uac[branch].ruid.s[ruid->len]=0;
993
memcpy( t->uac[branch].ruid.s, ruid->s, ruid->len);
995
/* copy the location_ua */
996
if (unlikely(location_ua && location_ua->s)){
997
t->uac[branch].location_ua.s=shm_malloc(location_ua->len+1);
998
if (unlikely(t->uac[branch].location_ua.s==0)) {
1000
t->uac[branch].request.buffer=0;
1001
t->uac[branch].request.buffer_len=0;
1002
t->uac[branch].uri.s=0;
1003
t->uac[branch].uri.len=0;
1004
ret=ser_error=E_OUT_OF_MEM;
1007
t->uac[branch].location_ua.len=location_ua->len;
1008
t->uac[branch].location_ua.s[location_ua->len]=0;
1009
memcpy( t->uac[branch].location_ua.s, location_ua->s, location_ua->len);
1012
t->uac[branch].on_reply = t->on_reply;
1013
t->uac[branch].on_failure = t->on_failure;
1014
t->uac[branch].on_branch_failure = t->on_branch_failure;
805
1016
membar_write(); /* to allow lockless ops (e.g. prepare_to_cancel()) we want
806
1017
to be sure everything above is fully written before
807
1018
updating branches no. */
870
1081
(old_uac->request.dst.send_flags.f &
871
1082
SND_F_FORCE_SOCKET)?
872
old_uac->request.dst.send_sock:0,
1083
old_uac->request.dst.send_sock:
1084
((tm_dns_reuse_rcv_socket)
1085
?msg->rcv.bind_address:0),
873
1086
old_uac->request.dst.send_flags,
874
1087
old_uac->request.dst.proto,
875
1088
old_uac->request.buffer,
876
old_uac->request.buffer_len);
1089
old_uac->request.buffer_len,
1090
&old_uac->instance, &old_uac->ruid,
1091
&old_uac->location_ua);
878
1093
/* add_uac will use dns_h => next_hop will be ignored.
879
1094
* Unfortunately we can't reuse the old buffer, the branch id
880
1095
* must be changed and the send_socket might be different =>
882
1097
ret=add_uac(t, msg, &old_uac->uri, 0, &old_uac->path, 0,
883
1098
(old_uac->request.dst.send_flags.f &
884
1099
SND_F_FORCE_SOCKET)?
885
old_uac->request.dst.send_sock:0,
1100
old_uac->request.dst.send_sock:
1101
((tm_dns_reuse_rcv_socket)
1102
?msg->rcv.bind_address:0),
886
1103
old_uac->request.dst.send_flags,
887
old_uac->request.dst.proto, UAC_DNS_FAILOVER_F);
1104
old_uac->request.dst.proto, UAC_DNS_FAILOVER_F,
1105
&old_uac->instance, &old_uac->ruid,
1106
&old_uac->location_ua);
890
1110
/* failed, delete the copied dns_h */
959
1179
if (unlikely((ret=prepare_new_uac( t_cancel, cancel_msg, branch,
960
1180
&t_invite->uac[branch].uri,
961
1181
&t_invite->uac[branch].path,
962
0, 0, snd_flags, PROTO_NONE, 0)) <0)){
1182
0, 0, snd_flags, PROTO_NONE, 0,
1183
NULL, NULL, NULL)) <0)){
1607
uac->icode = 908; /* internal code set to delivery failure */
1386
1608
LOG(L_ERR, "ERROR: t_send_branch: sending request on branch %d "
1387
1609
"failed\n", branch);
1388
1610
if (proxy) { proxy->errors++; proxy->ok=0; }
1611
if(tm_failure_exec_mode==1) {
1612
LM_DBG("putting branch %d on hold \n", branch);
1613
/* put on retransmission timer,
1614
* but set proto to NONE, so actually it is not trying to resend */
1615
uac->request.dst.proto = PROTO_NONE;
1616
/* reset last_received, 408 reply is faked by timer */
1617
uac->last_received=0;
1618
/* add to retransmission timer */
1619
if (start_retr( &uac->request )!=0){
1620
LM_CRIT("retransmission already started for %p\n",
1391
1628
if (unlikely(has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
1484
1721
branch_ret=add_uac( t, p_msg, GET_RURI(p_msg), GET_NEXT_HOP(p_msg),
1485
1722
&p_msg->path_vec, proxy, p_msg->force_send_socket,
1486
1723
p_msg->fwd_send_flags, proto,
1487
(p_msg->dst_uri.len)?0:UAC_SKIP_BR_DST_F);
1724
(p_msg->dst_uri.len)?0:UAC_SKIP_BR_DST_F, &p_msg->instance,
1725
&p_msg->ruid, &p_msg->location_ua);
1488
1726
if (branch_ret>=0)
1489
1727
added_branches |= 1<<branch_ret;
1494
1732
init_branch_iterator();
1495
1733
while((current_uri.s=next_branch( ¤t_uri.len, &q, &dst_uri, &path,
1734
&bflags, &si, &ruid, &instance, &location_ua))) {
1498
1736
setbflagsval(0, bflags);
1500
1738
branch_ret=add_uac( t, p_msg, ¤t_uri,
1501
1739
(dst_uri.len) ? (&dst_uri) : ¤t_uri,
1502
1740
&path, proxy, si, p_msg->fwd_send_flags,
1503
proto, (dst_uri.len)?0:UAC_SKIP_BR_DST_F);
1741
proto, (dst_uri.len)?0:UAC_SKIP_BR_DST_F, &instance,
1742
&ruid, &location_ua);
1504
1743
/* pick some of the errors in case things go wrong;
1505
1744
note that picking lowest error is just as good as
1506
1745
any other algorithm which picks any other negative