~ubuntu-branches/ubuntu/wily/trafficserver/wily

« back to all changes in this revision

Viewing changes to proxy/http/HttpClientSession.cc

  • Committer: Package Import Robot
  • Author(s): Adam Conrad
  • Date: 2012-12-17 22:28:16 UTC
  • mfrom: (5.1.8 raring-proposed)
  • Revision ID: package-import@ubuntu.com-20121217222816-7xwjsx5k76zkb63d
Tags: 3.2.0-1ubuntu1
* Revert FreeBSD strerror_r() fixes that give errors with glibc 2.16.
* Apply patch from Konstantinos Margaritis to define barriers on ARM.

Show diffs side-by-side

added added

removed removed

Lines of Context:
37
37
#include "HttpDebugNames.h"
38
38
#include "HttpServerSession.h"
39
39
 
 
40
#define DebugSsn(tag, ...) DebugSpecific(debug_on, tag, __VA_ARGS__)
40
41
#define STATE_ENTER(state_name, event, vio) { \
41
42
    /*ink_debug_assert (magic == HTTP_SM_MAGIC_ALIVE);  REMEMBER (event, NULL, reentrancy_count); */ \
42
 
        Debug("http_cs", "[%" PRId64 "] [%s, %s]", con_id, \
 
43
        DebugSsn("http_cs", "[%" PRId64 "] [%s, %s]", con_id, \
43
44
        #state_name, HttpDebugNames::get_event_name(event)); }
44
45
 
45
46
enum
58
59
 
59
60
HttpClientSession::HttpClientSession()
60
61
  : VConnection(NULL), con_id(0), client_vc(NULL), magic(HTTP_CS_MAGIC_DEAD),
 
62
    tcp_init_cwnd_set(false),
61
63
    transact_count(0), half_close(false), conn_decrease(false), bound_ss(NULL),
62
64
    read_buffer(NULL), current_reader(NULL), read_state(HCS_INIT),
63
65
    ka_vio(NULL), slave_ka_vio(NULL),
64
66
    cur_hook_id(TS_HTTP_LAST_HOOK), cur_hook(NULL),
65
 
    cur_hooks(0), backdoor_connect(false), hooks_set(0),
66
 
    m_active(false)
 
67
    cur_hooks(0), proxy_allocated(false), backdoor_connect(false), hooks_set(0),
 
68
    m_active(false), debug_on(false)
67
69
{
68
70
  memset(user_args, 0, sizeof(user_args));
69
71
}
71
73
void
72
74
HttpClientSession::cleanup()
73
75
{
74
 
  Debug("http_cs", "[%" PRId64 "] session destroy", con_id);
 
76
  DebugSsn("http_cs", "[%" PRId64 "] session destroy", con_id);
75
77
 
76
78
  ink_release_assert(client_vc == NULL);
77
79
  ink_release_assert(bound_ss == NULL);
90
92
  ink_assert(client_vc == 0);
91
93
  api_hooks.clear();
92
94
  mutex.clear();
 
95
  debug_on = false;
93
96
 
94
97
  if (conn_decrease) {
95
98
    HTTP_DECREMENT_DYN_STAT(http_current_client_connections_stat);
101
104
HttpClientSession::destroy()
102
105
{
103
106
  this->cleanup();
104
 
  httpClientSessionAllocator.free(this);
 
107
  if (proxy_allocated)
 
108
    THREAD_FREE(this, httpClientSessionAllocator, this_ethread());
 
109
  else
 
110
    httpClientSessionAllocator.free(this);
105
111
}
106
112
 
107
113
HttpClientSession *
146
152
  /////////////////////////
147
153
  // set up timeouts     //
148
154
  /////////////////////////
149
 
  Debug("http_cs", "[%" PRId64 "] using accept inactivity timeout [%d seconds]",
 
155
  DebugSsn("http_cs", "[%" PRId64 "] using accept inactivity timeout [%"PRId64" seconds]",
150
156
        con_id, HttpConfig::m_master.accept_no_activity_timeout);
151
157
  client_vc->set_inactivity_timeout(HRTIME_SECONDS(HttpConfig::m_master.accept_no_activity_timeout));
152
158
 
153
159
  client_vc->set_active_timeout(HRTIME_SECONDS(HttpConfig::m_master.transaction_active_timeout_in));
154
160
 
155
161
  transact_count++;
156
 
  Debug("http_cs", "[%" PRId64 "] Starting transaction %d using sm [%" PRId64 "]", con_id, transact_count, current_reader->sm_id);
 
162
  DebugSsn("http_cs", "[%" PRId64 "] Starting transaction %d using sm [%" PRId64 "]", con_id, transact_count, current_reader->sm_id);
157
163
 
158
164
  current_reader->attach_client_session(this, sm_reader);
159
165
}
201
207
  // check what type of socket address we just accepted
202
208
  // by looking at the address family value of sockaddr_storage
203
209
  // and logging to stat system
204
 
  switch(new_vc->get_remote_addr()->ss_family) {
 
210
  switch(new_vc->get_remote_addr()->sa_family) {
205
211
    case AF_INET:
206
212
      HTTP_INCREMENT_DYN_STAT(http_total_client_connections_ipv4_stat);
207
213
    break;
224
230
  ink_mutex_release(&debug_cs_list_mutex);
225
231
#endif
226
232
 
227
 
  Debug("http_cs", "[%" PRId64 "] session born, netvc %p", con_id, new_vc);
 
233
  DebugSsn("http_cs", "[%" PRId64 "] session born, netvc %p", con_id, new_vc);
228
234
 
229
235
  read_buffer = new_MIOBuffer(HTTP_HEADER_BUFFER_SIZE_INDEX);
230
236
  sm_reader = read_buffer->alloc_reader();
249
255
VIO *
250
256
HttpClientSession::do_io_write(Continuation * c, int64_t nbytes, IOBufferReader * buf, bool owner)
251
257
{
 
258
  /* conditionally set the tcp initial congestion window
 
259
     before our first write. */
 
260
  DebugSsn("http_cs", "tcp_init_cwnd_set %d\n", (int)tcp_init_cwnd_set);
 
261
  if(!tcp_init_cwnd_set) {
 
262
    tcp_init_cwnd_set = true;
 
263
    set_tcp_init_cwnd();
 
264
  }
252
265
  return client_vc->do_io_write(c, nbytes, buf, owner);
253
266
}
254
267
 
255
268
void
 
269
HttpClientSession::set_tcp_init_cwnd()
 
270
{
 
271
  int desired_tcp_init_cwnd = current_reader->t_state.txn_conf->server_tcp_init_cwnd;
 
272
  DebugSsn("http_cs", "desired TCP congestion window is %d\n", desired_tcp_init_cwnd);
 
273
  if(desired_tcp_init_cwnd == 0) return;
 
274
  if(get_netvc()->set_tcp_init_cwnd(desired_tcp_init_cwnd) != 0)
 
275
    DebugSsn("http_cs", "set_tcp_init_cwnd(%d) failed", desired_tcp_init_cwnd);
 
276
}
 
277
 
 
278
void
256
279
HttpClientSession::do_io_shutdown(ShutdownHowTo_t howto)
257
280
{
258
281
  client_vc->do_io_shutdown(howto);
283
306
  if (half_close) {
284
307
    read_state = HCS_HALF_CLOSED;
285
308
    SET_HANDLER(&HttpClientSession::state_wait_for_close);
286
 
    Debug("http_cs", "[%" PRId64 "] session half close", con_id);
 
309
    DebugSsn("http_cs", "[%" PRId64 "] session half close", con_id);
287
310
 
288
311
    // We want the client to know that that we're finished
289
312
    //  writing.  The write shutdown accomplishes this.  Unfortuantely,
304
327
    // Set the active timeout to the same as the inactive time so
305
328
    //   that this connection does not hang around forever if
306
329
    //   the ua hasn't closed
307
 
    client_vc->set_active_timeout(HRTIME_SECONDS(HttpConfig::m_master.keep_alive_no_activity_timeout_out));
 
330
    client_vc->set_active_timeout(HRTIME_SECONDS(current_reader->t_state.txn_conf->keep_alive_no_activity_timeout_out));
308
331
  } else {
309
332
    read_state = HCS_CLOSED;
310
333
    client_vc->do_io_close(alerrno);
311
 
    Debug("http_cs", "[%" PRId64 "] session closed", con_id);
 
334
    DebugSsn("http_cs", "[%" PRId64 "] session closed", con_id);
312
335
    client_vc = NULL;
313
336
    HTTP_SUM_DYN_STAT(http_transactions_per_client_con, transact_count);
314
337
    HTTP_DECREMENT_DYN_STAT(http_current_client_connections_stat);
532
555
    ink_assert(bound_ss == NULL);
533
556
    ssession->state = HSS_KA_CLIENT_SLAVE;
534
557
    bound_ss = ssession;
535
 
    Debug("http_cs", "[%" PRId64 "] attaching server session [%" PRId64 "] as slave", con_id, ssession->con_id);
 
558
    DebugSsn("http_cs", "[%" PRId64 "] attaching server session [%" PRId64 "] as slave", con_id, ssession->con_id);
536
559
    ink_assert(ssession->get_reader()->read_avail() == 0);
537
560
    ink_assert(ssession->get_netvc() != client_vc);
538
561
 
553
576
 
554
577
    if (transaction_done) {
555
578
      ssession->get_netvc()->
556
 
        set_inactivity_timeout(HRTIME_SECONDS(HttpConfig::m_master.keep_alive_no_activity_timeout_out));
 
579
        set_inactivity_timeout(HRTIME_SECONDS(current_reader->t_state.txn_conf->keep_alive_no_activity_timeout_out));
557
580
      ssession->get_netvc()->
558
 
        set_active_timeout(HRTIME_SECONDS(HttpConfig::m_master.keep_alive_no_activity_timeout_out));
 
581
        set_active_timeout(HRTIME_SECONDS(current_reader->t_state.txn_conf->keep_alive_no_activity_timeout_out));
559
582
    } else {
560
583
      // we are serving from the cache - this could take a while.
561
584
      ssession->get_netvc()->cancel_inactivity_timeout();
575
598
  ink_assert(current_reader != NULL);
576
599
  MgmtInt ka_in = current_reader->t_state.txn_conf->keep_alive_no_activity_timeout_in;
577
600
 
578
 
  Debug("http_cs", "[%" PRId64 "] session released by sm [%" PRId64 "]", con_id, current_reader->sm_id);
 
601
  DebugSsn("http_cs", "[%" PRId64 "] session released by sm [%" PRId64 "]", con_id, current_reader->sm_id);
579
602
  current_reader = NULL;
580
603
 
581
604
  // handling potential keep-alive here
598
621
  //  machine to process it.  Otherwise, issue an
599
622
  //  IO to wait for new data
600
623
  if (sm_reader->read_avail() > 0) {
601
 
    Debug("http_cs", "[%" PRId64 "] data already in buffer, starting new transaction", con_id);
 
624
    DebugSsn("http_cs", "[%" PRId64 "] data already in buffer, starting new transaction", con_id);
602
625
    new_transaction();
603
626
  } else {
604
 
    Debug("http_cs", "[%" PRId64 "] initiating io for next header", con_id);
 
627
    DebugSsn("http_cs", "[%" PRId64 "] initiating io for next header", con_id);
605
628
    read_state = HCS_KEEP_ALIVE;
606
629
    SET_HANDLER(&HttpClientSession::state_keep_alive);
607
630
    ka_vio = this->do_io_read(this, INT64_MAX, read_buffer);