X-Git-Url: https://git.m6w6.name/?a=blobdiff_plain;f=libmemcached%2Fquit.cc;h=5d17b3133e76375e92d8f88498ee73d52784e967;hb=bdb615a66ac1df020917d000adf2f73e49fd3a25;hp=4bf72f64adf94b985e4a2eb9506414de171122a5;hpb=ca663a567bc8d3facb22b035bcad19349e42a9b1;p=m6w6%2Flibmemcached diff --git a/libmemcached/quit.cc b/libmemcached/quit.cc index 4bf72f64..5d17b313 100644 --- a/libmemcached/quit.cc +++ b/libmemcached/quit.cc @@ -37,6 +37,66 @@ #include +namespace { + memcached_return_t send_quit_message(memcached_instance_st* instance) + { + memcached_return_t rc; + if (instance->root->flags.binary_protocol) + { + protocol_binary_request_quit request= {}; // = {.bytes= {0}}; + + initialize_binary_request(instance, request.message.header); + + request.message.header.request.opcode = PROTOCOL_BINARY_CMD_QUIT; + request.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES; + + libmemcached_io_vector_st vector[]= + { + { request.bytes, sizeof(request.bytes) } + }; + + rc= memcached_vdo(instance, vector, 1, true); + } + else + { + libmemcached_io_vector_st vector[]= + { + { memcached_literal_param("quit\r\n") } + }; + + rc= memcached_vdo(instance, vector, 1, true); + } + + return rc; + } + + void drain_instance(memcached_instance_st* instance) + { + /* read until socket is closed, or there is an error + * closing the socket before all data is read + * results in server throwing away all data which is + * not read + * + * In .40 we began to only do this if we had been doing buffered + * requests of had replication enabled. + */ + if (instance->root->flags.buffer_requests or instance->root->number_of_replicas) + { + memcached_io_slurp(instance); + } + + /* + * memcached_io_read may call memcached_quit_server with io_death if + * it encounters problems, but we don't care about those occurences. + * The intention of that loop is to drain the data sent from the + * server to ensure that the server processed all of the data we + * sent to the server. + */ + instance->server_failure_counter= 0; + instance->server_timeout_counter= 0; + } +} + /* This closes all connections (forces flush of input as well). @@ -46,112 +106,52 @@ will force data to be completed. */ -void memcached_quit_server(memcached_instance_st *ptr, bool io_death) +void memcached_quit_server(memcached_instance_st* instance, bool io_death) { - if (ptr->fd != INVALID_SOCKET) + if (instance->valid()) { - if (io_death == false and memcached_is_udp(ptr->root) == false and ptr->options.is_shutting_down == false) + if (io_death == false and memcached_is_udp(instance->root) == false and instance->is_shutting_down() == false) { - ptr->options.is_shutting_down= true; - - memcached_return_t rc; - if (ptr->root->flags.binary_protocol) - { - protocol_binary_request_quit request= {}; // = {.bytes= {0}}; - - initialize_binary_request(ptr, request.message.header); + send_quit_message(instance); - request.message.header.request.opcode = PROTOCOL_BINARY_CMD_QUIT; - request.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES; - - libmemcached_io_vector_st vector[]= - { - { request.bytes, sizeof(request.bytes) } - }; - - rc= memcached_vdo(ptr, vector, 1, true); - } - else - { - libmemcached_io_vector_st vector[]= - { - { memcached_literal_param("quit\r\n") } - }; - - rc= memcached_vdo(ptr, vector, 1, true); - } - - /* read until socket is closed, or there is an error - * closing the socket before all data is read - * results in server throwing away all data which is - * not read - * - * In .40 we began to only do this if we had been doing buffered - * requests of had replication enabled. - */ - if (memcached_success(rc) and (ptr->root->flags.buffer_requests or ptr->root->number_of_replicas)) - { - if (0) - { - memcached_return_t rc_slurp; - while (memcached_continue(rc_slurp= memcached_io_slurp(ptr))) {} ; - WATCHPOINT_ASSERT(rc_slurp == MEMCACHED_CONNECTION_FAILURE); - } - else - { - memcached_io_slurp(ptr); - } - } - - /* - * memcached_io_read may call memcached_quit_server with io_death if - * it encounters problems, but we don't care about those occurences. - * The intention of that loop is to drain the data sent from the - * server to ensure that the server processed all of the data we - * sent to the server. - */ - ptr->server_failure_counter= 0; + instance->start_close_socket(); + drain_instance(instance); } - memcached_io_close(ptr); } - ptr->state= MEMCACHED_SERVER_STATE_NEW; - ptr->cursor_active= 0; - ptr->io_bytes_sent= 0; - ptr->write_buffer_offset= size_t(ptr->root and memcached_is_udp(ptr->root) ? UDP_DATAGRAM_HEADER_LENGTH : 0); - ptr->read_buffer_length= 0; - ptr->read_ptr= ptr->read_buffer; - ptr->options.is_shutting_down= false; - memcached_server_response_reset(ptr); + instance->close_socket(); - // We reset the version so that if we end up talking to a different server - // we don't have stale server version information. - ptr->major_version= ptr->minor_version= ptr->micro_version= UINT8_MAX; - - if (io_death) + if (io_death and memcached_is_udp(instance->root)) { - memcached_mark_server_for_timeout(ptr); + /* + If using UDP, we should stop using the server briefly on every IO + failure. If using TCP, it may be that the connection went down a + short while ago (e.g. the server failed) and we've only just + noticed, so we should only set the retry timeout on a connect + failure (which doesn't call this method). + */ + memcached_mark_server_for_timeout(instance); } } -void send_quit(memcached_st *ptr) +void send_quit(Memcached *memc) { - for (uint32_t x= 0; x < memcached_server_count(ptr); x++) + for (uint32_t x= 0; x < memcached_server_count(memc); x++) { - memcached_server_write_instance_st instance= - memcached_server_instance_fetch(ptr, x); + memcached_instance_st* instance= memcached_instance_fetch(memc, x); memcached_quit_server(instance, false); } } -void memcached_quit(memcached_st *ptr) +void memcached_quit(memcached_st *shell) { + Memcached* memc= memcached2Memcached(shell); memcached_return_t rc; - if (memcached_failed(rc= initialize_query(ptr, true))) + if (memcached_failed(rc= initialize_query(memc, true))) { return; } - send_quit(ptr); + send_quit(memc); }