p9y: bsd
[m6w6/libmemcached] / src / libmemcached / io.cc
1 /*
2 +--------------------------------------------------------------------+
3 | libmemcached - C/C++ Client Library for memcached |
4 +--------------------------------------------------------------------+
5 | Redistribution and use in source and binary forms, with or without |
6 | modification, are permitted under the terms of the BSD license. |
7 | You should have received a copy of the license in a bundled file |
8 | named LICENSE; in case you did not receive a copy you can review |
9 | the terms online at: https://opensource.org/licenses/BSD-3-Clause |
10 +--------------------------------------------------------------------+
11 | Copyright (c) 2006-2014 Brian Aker https://datadifferential.com/ |
12 | Copyright (c) 2020 Michael Wallner <mike@php.net> |
13 +--------------------------------------------------------------------+
14 */
15
16 #include "libmemcached/common.h"
17 #include "p9y/poll.hpp"
18
19 void initialize_binary_request(memcached_instance_st *server,
20 protocol_binary_request_header &header) {
21 server->request_id++;
22 header.request.magic = PROTOCOL_BINARY_REQ;
23 header.request.opaque = htons(server->request_id);
24 }
25
26 enum memc_read_or_write { MEM_READ, MEM_WRITE };
27
28 /**
29 * Try to fill the input buffer for a server with as much
30 * data as possible.
31 *
32 * @param instance the server to pack
33 */
34 static bool repack_input_buffer(memcached_instance_st *instance) {
35 if (instance->read_ptr != instance->read_buffer) {
36 /* Move all of the data to the beginning of the buffer so
37 ** that we can fit more data into the buffer...
38 */
39 memmove(instance->read_buffer, instance->read_ptr, instance->read_buffer_length);
40 instance->read_ptr = instance->read_buffer;
41 }
42
43 /* There is room in the buffer, try to fill it! */
44 if (instance->read_buffer_length != MEMCACHED_MAX_BUFFER) {
45 do {
46 /* Just try a single read to grab what's available */
47 ssize_t nr;
48 if ((nr = ::recv(instance->fd, instance->read_ptr + instance->read_buffer_length,
49 MEMCACHED_MAX_BUFFER - instance->read_buffer_length, MSG_NOSIGNAL))
50 <= 0)
51 {
52 if (nr == 0) {
53 memcached_set_error(*instance, MEMCACHED_CONNECTION_FAILURE, MEMCACHED_AT);
54 } else {
55 switch (get_socket_errno()) {
56 case EINTR:
57 continue;
58
59 #if EWOULDBLOCK != EAGAIN
60 case EWOULDBLOCK:
61 #endif
62 case EAGAIN:
63 #ifdef __linux
64 case ERESTART:
65 #endif
66 break; // No IO is fine, we can just move on
67
68 default:
69 memcached_set_errno(*instance, get_socket_errno(), MEMCACHED_AT);
70 }
71 }
72
73 break;
74 } else // We read data, append to our read buffer
75 {
76 instance->read_buffer_length += size_t(nr);
77
78 return true;
79 }
80 } while (false);
81 }
82
83 return false;
84 }
85
86 /**
87 * If the we have callbacks connected to this server structure
88 * we may start process the input queue and fire the callbacks
89 * for the incomming messages. This function is _only_ called
90 * when the input buffer is full, so that we _know_ that we have
91 * at least _one_ message to process.
92 *
93 * @param instance the server to star processing iput messages for
94 * @return true if we processed anything, false otherwise
95 */
96 static bool process_input_buffer(memcached_instance_st *instance) {
97 /*
98 ** We might be able to process some of the response messages if we
99 ** have a callback set up
100 */
101 if (instance->root->callbacks) {
102 /*
103 * We might have responses... try to read them out and fire
104 * callbacks
105 */
106 memcached_callback_st cb = *instance->root->callbacks;
107
108 memcached_set_processing_input((Memcached *) instance->root, true);
109
110 char buffer[MEMCACHED_DEFAULT_COMMAND_SIZE];
111 Memcached *root = (Memcached *) instance->root;
112 memcached_return_t error = memcached_response(instance, buffer, sizeof(buffer), &root->result);
113
114 memcached_set_processing_input(root, false);
115
116 if (error == MEMCACHED_SUCCESS) {
117 for (unsigned int x = 0; x < cb.number_of_callback; x++) {
118 error = (*cb.callback[x])(instance->root, &root->result, cb.context);
119 if (error != MEMCACHED_SUCCESS) {
120 break;
121 }
122 }
123
124 /* @todo what should I do with the error message??? */
125 }
126 /* @todo what should I do with other error messages?? */
127 return true;
128 }
129
130 return false;
131 }
132
133 static memcached_return_t io_wait(memcached_instance_st *instance, const short events) {
134 /*
135 ** We are going to block on write, but at least on Solaris we might block
136 ** on write if we haven't read anything from our input buffer..
137 ** Try to purge the input buffer if we don't do any flow control in the
138 ** application layer (just sending a lot of data etc)
139 ** The test is moved down in the purge function to avoid duplication of
140 ** the test.
141 */
142 if (events & POLLOUT) {
143 if (memcached_purge(instance) == false) {
144 return MEMCACHED_FAILURE;
145 }
146 }
147
148 struct pollfd fds;
149 fds.fd = instance->fd;
150 fds.events = events;
151 fds.revents = 0;
152
153 if (fds.events & POLLOUT) /* write */ {
154 instance->io_wait_count.write++;
155 } else {
156 instance->io_wait_count.read++;
157 }
158
159 if (instance->root->poll_timeout
160 == 0) // Mimic 0 causes timeout behavior (not all platforms do this)
161 {
162 return memcached_set_error(*instance, MEMCACHED_TIMEOUT, MEMCACHED_AT,
163 memcached_literal_param("poll_timeout() was set to zero"));
164 }
165
166 size_t loop_max = 5;
167 while (--loop_max) // While loop is for ERESTART or EINTR
168 {
169 int active_fd = poll(&fds, 1, instance->root->poll_timeout);
170
171 if (active_fd >= 1) {
172 assert_msg(active_fd == 1, "poll() returned an unexpected number of active file descriptors");
173 if (fds.revents & POLLIN or fds.revents & POLLOUT) {
174 return MEMCACHED_SUCCESS;
175 }
176
177 if (fds.revents & POLLHUP) {
178 return memcached_set_error(*instance, MEMCACHED_CONNECTION_FAILURE, MEMCACHED_AT,
179 memcached_literal_param("poll() detected hang up"));
180 }
181
182 if (fds.revents & POLLERR) {
183 int local_errno = EINVAL;
184 int err;
185 socklen_t len = sizeof(err);
186 if (getsockopt(instance->fd, SOL_SOCKET, SO_ERROR, (char *) &err, &len) == 0) {
187 if (err == 0) // treat this as EINTR
188 {
189 continue;
190 }
191 local_errno = err;
192 }
193 memcached_quit_server(instance, true);
194 return memcached_set_errno(*instance, local_errno, MEMCACHED_AT,
195 memcached_literal_param("poll() returned POLLHUP"));
196 }
197
198 return memcached_set_error(
199 *instance, MEMCACHED_FAILURE, MEMCACHED_AT,
200 memcached_literal_param("poll() returned a value that was not dealt with"));
201 }
202
203 if (active_fd == 0) {
204 return memcached_set_error(*instance, MEMCACHED_TIMEOUT, MEMCACHED_AT,
205 memcached_literal_param("No active_fd were found"));
206 }
207
208 // Only an error should result in this code being called.
209 int local_errno = get_socket_errno(); // We cache in case memcached_quit_server() modifies errno
210 assert_msg(active_fd == -1, "poll() returned an unexpected value");
211 switch (local_errno) {
212 #ifdef __linux
213 case ERESTART:
214 #endif
215 case EINTR:
216 continue;
217
218 case EFAULT:
219 case ENOMEM:
220 memcached_set_error(*instance, MEMCACHED_MEMORY_ALLOCATION_FAILURE, MEMCACHED_AT);
221 break;
222
223 case EINVAL:
224 memcached_set_error(*instance, MEMCACHED_MEMORY_ALLOCATION_FAILURE, MEMCACHED_AT,
225 memcached_literal_param(
226 "RLIMIT_NOFILE exceeded, or if OSX the timeout value was invalid"));
227 break;
228
229 default:
230 memcached_set_errno(*instance, local_errno, MEMCACHED_AT, memcached_literal_param("poll"));
231 }
232
233 break;
234 }
235
236 memcached_quit_server(instance, true);
237
238 if (memcached_has_error(instance)) {
239 return memcached_instance_error_return(instance);
240 }
241
242 return memcached_set_error(
243 *instance, MEMCACHED_CONNECTION_FAILURE, MEMCACHED_AT,
244 memcached_literal_param("number of attempts to call io_wait() failed"));
245 }
246
247 static bool io_flush(memcached_instance_st *instance, const bool with_flush,
248 memcached_return_t &error) {
249 /*
250 ** We might want to purge the input buffer if we haven't consumed
251 ** any output yet... The test for the limits is the purge is inline
252 ** in the purge function to avoid duplicating the logic..
253 */
254 {
255 WATCHPOINT_ASSERT(instance->fd != INVALID_SOCKET);
256
257 if (memcached_purge(instance) == false) {
258 return false;
259 }
260 }
261 char *local_write_ptr = instance->write_buffer;
262 size_t write_length = instance->write_buffer_offset;
263
264 error = MEMCACHED_SUCCESS;
265
266 WATCHPOINT_ASSERT(instance->fd != INVALID_SOCKET);
267
268 /* Looking for memory overflows */
269 #if defined(DEBUG)
270 if (write_length == MEMCACHED_MAX_BUFFER)
271 WATCHPOINT_ASSERT(instance->write_buffer == local_write_ptr);
272 WATCHPOINT_ASSERT((instance->write_buffer + MEMCACHED_MAX_BUFFER)
273 >= (local_write_ptr + write_length));
274 #endif
275
276 while (write_length) {
277 WATCHPOINT_ASSERT(instance->fd != INVALID_SOCKET);
278 WATCHPOINT_ASSERT(write_length > 0);
279
280 int flags;
281 if (with_flush) {
282 flags = MSG_NOSIGNAL;
283 } else {
284 flags = MSG_NOSIGNAL | MSG_MORE;
285 }
286
287 ssize_t sent_length = ::send(instance->fd, local_write_ptr, write_length, flags);
288 int local_errno = get_socket_errno(); // We cache in case memcached_quit_server() modifies errno
289
290 if (sent_length == SOCKET_ERROR) {
291 #if 0 // @todo I should look at why we hit this bit of code hard frequently
292 WATCHPOINT_ERRNO(get_socket_errno());
293 WATCHPOINT_NUMBER(get_socket_errno());
294 #endif
295 switch (get_socket_errno()) {
296 case ENOBUFS:
297 continue;
298
299 #if EWOULDBLOCK != EAGAIN
300 case EWOULDBLOCK:
301 #endif
302 case EAGAIN: {
303 /*
304 * We may be blocked on write because the input buffer
305 * is full. Let's check if we have room in our input
306 * buffer for more data and retry the write before
307 * waiting..
308 */
309 if (repack_input_buffer(instance) or process_input_buffer(instance)) {
310 continue;
311 }
312
313 memcached_return_t rc = io_wait(instance, POLLOUT);
314 if (memcached_success(rc)) {
315 continue;
316 } else if (rc == MEMCACHED_TIMEOUT) {
317 return false;
318 }
319
320 memcached_quit_server(instance, true);
321 error = memcached_set_errno(*instance, local_errno, MEMCACHED_AT);
322 return false;
323 }
324 case ENOTCONN:
325 case EPIPE:
326 default:
327 memcached_quit_server(instance, true);
328 error = memcached_set_errno(*instance, local_errno, MEMCACHED_AT);
329 WATCHPOINT_ASSERT(instance->fd == INVALID_SOCKET);
330 return false;
331 }
332 }
333
334 instance->io_bytes_sent += uint32_t(sent_length);
335
336 local_write_ptr += sent_length;
337 write_length -= uint32_t(sent_length);
338 }
339
340 WATCHPOINT_ASSERT(write_length == 0);
341 instance->write_buffer_offset = 0;
342
343 return true;
344 }
345
346 memcached_return_t memcached_io_wait_for_write(memcached_instance_st *instance) {
347 return io_wait(instance, POLLOUT);
348 }
349
350 memcached_return_t memcached_io_wait_for_read(memcached_instance_st *instance) {
351 return io_wait(instance, POLLIN);
352 }
353
354 static memcached_return_t _io_fill(memcached_instance_st *instance) {
355 ssize_t data_read;
356 do {
357 data_read = ::recv(instance->fd, instance->read_buffer, MEMCACHED_MAX_BUFFER, MSG_NOSIGNAL);
358 int local_errno = get_socket_errno(); // We cache in case memcached_quit_server() modifies errno
359
360 if (data_read == SOCKET_ERROR) {
361 switch (get_socket_errno()) {
362 case EINTR: // We just retry
363 continue;
364
365 case ETIMEDOUT: // OSX
366 #if EWOULDBLOCK != EAGAIN
367 case EWOULDBLOCK:
368 #endif
369 case EAGAIN:
370 #ifdef __linux
371 case ERESTART:
372 #endif
373 {
374 memcached_return_t io_wait_ret;
375 if (memcached_success(io_wait_ret = io_wait(instance, POLLIN))) {
376 continue;
377 }
378
379 return io_wait_ret;
380 }
381
382 /* fall through */
383
384 case ENOTCONN: // Programmer Error
385 WATCHPOINT_ASSERT(0);
386 // fall through
387 case ENOTSOCK:
388 WATCHPOINT_ASSERT(0);
389 // fall through
390 case EBADF:
391 assert_msg(instance->fd != INVALID_SOCKET, "Programmer error, invalid socket");
392 /* fall through */
393 case EINVAL:
394 case EFAULT:
395 case ECONNREFUSED:
396 default:
397 memcached_quit_server(instance, true);
398 memcached_set_errno(*instance, local_errno, MEMCACHED_AT);
399 break;
400 }
401
402 return memcached_instance_error_return(instance);
403 } else if (data_read == 0) {
404 /*
405 EOF. Any data received so far is incomplete
406 so discard it. This always reads by byte in case of TCP
407 and protocol enforcement happens at memcached_response()
408 looking for '\n'. We do not care for UDB which requests 8 bytes
409 at once. Generally, this means that connection went away. Since
410 for blocking I/O we do not return 0 and for non-blocking case
411 it will return EGAIN if data is not immediatly available.
412 */
413 memcached_quit_server(instance, true);
414 return memcached_set_error(
415 *instance, MEMCACHED_CONNECTION_FAILURE, MEMCACHED_AT,
416 memcached_literal_param("::rec() returned zero, server has disconnected"));
417 }
418 instance->io_wait_count._bytes_read += data_read;
419 } while (data_read <= 0);
420
421 instance->io_bytes_sent = 0;
422 instance->read_buffer_length = (size_t) data_read;
423 instance->read_ptr = instance->read_buffer;
424
425 return MEMCACHED_SUCCESS;
426 }
427
428 memcached_return_t memcached_io_read(memcached_instance_st *instance, void *buffer, size_t length,
429 ssize_t &nread) {
430 assert(memcached_is_udp(instance->root) == false);
431 assert_msg(
432 instance,
433 "Programmer error, memcached_io_read() recieved an invalid Instance"); // Programmer error
434 char *buffer_ptr = static_cast<char *>(buffer);
435
436 if (instance->fd == INVALID_SOCKET) {
437 #if 0
438 assert_msg(int(instance->state) <= int(MEMCACHED_SERVER_STATE_ADDRINFO), "Programmer error, invalid socket state");
439 #endif
440 return MEMCACHED_CONNECTION_FAILURE;
441 }
442
443 while (length) {
444 if (instance->read_buffer_length == 0) {
445 memcached_return_t io_fill_ret;
446 if (memcached_fatal(io_fill_ret = _io_fill(instance))) {
447 nread = -1;
448 return io_fill_ret;
449 }
450 }
451
452 if (length > 1) {
453 size_t difference =
454 (length > instance->read_buffer_length) ? instance->read_buffer_length : length;
455
456 memcpy(buffer_ptr, instance->read_ptr, difference);
457 length -= difference;
458 instance->read_ptr += difference;
459 instance->read_buffer_length -= difference;
460 buffer_ptr += difference;
461 } else {
462 *buffer_ptr = *instance->read_ptr;
463 instance->read_ptr++;
464 instance->read_buffer_length--;
465 buffer_ptr++;
466 break;
467 }
468 }
469
470 nread = ssize_t(buffer_ptr - (char *) buffer);
471
472 return MEMCACHED_SUCCESS;
473 }
474
475 memcached_return_t memcached_io_slurp(memcached_instance_st *instance) {
476 assert_msg(instance, "Programmer error, invalid Instance");
477 assert(memcached_is_udp(instance->root) == false);
478
479 if (instance->fd == INVALID_SOCKET) {
480 assert_msg(int(instance->state) <= int(MEMCACHED_SERVER_STATE_ADDRINFO),
481 "Invalid socket state");
482 return MEMCACHED_CONNECTION_FAILURE;
483 }
484
485 ssize_t data_read;
486 char buffer[MEMCACHED_MAX_BUFFER];
487 do {
488 data_read = ::recv(instance->fd, instance->read_buffer, sizeof(buffer), MSG_NOSIGNAL);
489 if (data_read == SOCKET_ERROR) {
490 switch (get_socket_errno()) {
491 case EINTR: // We just retry
492 continue;
493
494 case ETIMEDOUT: // OSX
495 #if EWOULDBLOCK != EAGAIN
496 case EWOULDBLOCK:
497 #endif
498 case EAGAIN:
499 #ifdef __linux
500 case ERESTART:
501 #endif
502 if (memcached_success(io_wait(instance, POLLIN))) {
503 continue;
504 }
505 return MEMCACHED_IN_PROGRESS;
506
507 /* fall through */
508
509 case ENOTCONN: // Programmer Error
510 case ENOTSOCK:
511 assert(0);
512 /* fall through */
513 case EBADF:
514 assert_msg(instance->fd != INVALID_SOCKET, "Invalid socket state");
515 /* fall through */
516 case EINVAL:
517 case EFAULT:
518 case ECONNREFUSED:
519 default:
520 return MEMCACHED_CONNECTION_FAILURE; // We want this!
521 }
522 }
523 } while (data_read > 0);
524
525 return MEMCACHED_CONNECTION_FAILURE;
526 }
527
528 static bool _io_write(memcached_instance_st *instance, const void *buffer, size_t length,
529 bool with_flush, size_t &written) {
530 assert(instance->fd != INVALID_SOCKET);
531 assert(memcached_is_udp(instance->root) == false);
532
533 const char *buffer_ptr = static_cast<const char *>(buffer);
534
535 const size_t original_length = length;
536
537 while (length) {
538 char *write_ptr;
539 size_t buffer_end = MEMCACHED_MAX_BUFFER;
540 size_t should_write = buffer_end - instance->write_buffer_offset;
541 should_write = (should_write < length) ? should_write : length;
542
543 write_ptr = instance->write_buffer + instance->write_buffer_offset;
544 memcpy(write_ptr, buffer_ptr, should_write);
545 instance->write_buffer_offset += should_write;
546 buffer_ptr += should_write;
547 length -= should_write;
548
549 if (instance->write_buffer_offset == buffer_end) {
550 WATCHPOINT_ASSERT(instance->fd != INVALID_SOCKET);
551
552 memcached_return_t rc;
553 if (io_flush(instance, with_flush, rc) == false) {
554 written = original_length - length;
555 return false;
556 }
557 }
558 }
559
560 if (with_flush) {
561 memcached_return_t rc;
562 WATCHPOINT_ASSERT(instance->fd != INVALID_SOCKET);
563 if (io_flush(instance, with_flush, rc) == false) {
564 written = original_length - length;
565 return false;
566 }
567 }
568
569 written = original_length - length;
570
571 return true;
572 }
573
574 bool memcached_io_write(memcached_instance_st *instance) {
575 size_t written;
576 return _io_write(instance, NULL, 0, true, written);
577 }
578
579 ssize_t memcached_io_write(memcached_instance_st *instance, const void *buffer, const size_t length,
580 const bool with_flush) {
581 size_t written;
582
583 if (_io_write(instance, buffer, length, with_flush, written) == false) {
584 return -1;
585 }
586
587 return ssize_t(written);
588 }
589
590 bool memcached_io_writev(memcached_instance_st *instance, libmemcached_io_vector_st vector[],
591 const size_t number_of, const bool with_flush) {
592 ssize_t complete_total = 0;
593 ssize_t total = 0;
594
595 for (size_t x = 0; x < number_of; x++, vector++) {
596 complete_total += vector->length;
597 if (vector->length) {
598 size_t written;
599 if ((_io_write(instance, vector->buffer, vector->length, false, written)) == false) {
600 return false;
601 }
602 total += written;
603 }
604 }
605
606 if (with_flush) {
607 if (memcached_io_write(instance) == false) {
608 return false;
609 }
610 }
611
612 return (complete_total == total);
613 }
614
615 void memcached_instance_st::start_close_socket() {
616 if (fd != INVALID_SOCKET) {
617 shutdown(fd, SHUT_WR);
618 options.is_shutting_down = true;
619 }
620 }
621
622 void memcached_instance_st::reset_socket() {
623 if (fd != INVALID_SOCKET) {
624 (void) closesocket(fd);
625 fd = INVALID_SOCKET;
626 }
627 }
628
629 void memcached_instance_st::close_socket() {
630 if (fd != INVALID_SOCKET) {
631 int shutdown_options = SHUT_RD;
632 if (options.is_shutting_down == false) {
633 shutdown_options = SHUT_RDWR;
634 }
635
636 /* in case of death shutdown to avoid blocking at close() */
637 if (shutdown(fd, shutdown_options) == SOCKET_ERROR and get_socket_errno() != ENOTCONN) {
638 WATCHPOINT_NUMBER(fd);
639 WATCHPOINT_ERRNO(get_socket_errno());
640 WATCHPOINT_ASSERT(get_socket_errno());
641 }
642
643 reset_socket();
644 state = MEMCACHED_SERVER_STATE_NEW;
645 }
646
647 state = MEMCACHED_SERVER_STATE_NEW;
648 cursor_active_ = 0;
649 io_bytes_sent = 0;
650 write_buffer_offset = size_t(root and memcached_is_udp(root) ? UDP_DATAGRAM_HEADER_LENGTH : 0);
651 read_buffer_length = 0;
652 read_ptr = read_buffer;
653 options.is_shutting_down = false;
654 memcached_server_response_reset(this);
655
656 // We reset the version so that if we end up talking to a different server
657 // we don't have stale server version information.
658 major_version = minor_version = micro_version = UINT8_MAX;
659 }
660
661 memcached_instance_st *memcached_io_get_readable_server(Memcached *memc, memcached_return_t &) {
662 #define MAX_SERVERS_TO_POLL 100
663 struct pollfd fds[MAX_SERVERS_TO_POLL];
664 nfds_t host_index = 0;
665
666 for (uint32_t x = 0; x < memcached_server_count(memc) and host_index < MAX_SERVERS_TO_POLL; ++x) {
667 memcached_instance_st *instance = memcached_instance_fetch(memc, x);
668
669 if (instance->read_buffer_length > 0) /* I have data in the buffer */ {
670 return instance;
671 }
672
673 if (instance->response_count() > 0) {
674 fds[host_index].events = POLLIN;
675 fds[host_index].revents = 0;
676 fds[host_index].fd = instance->fd;
677 ++host_index;
678 }
679 }
680
681 if (host_index < 2) {
682 /* We have 0 or 1 server with pending events.. */
683 for (uint32_t x = 0; x < memcached_server_count(memc); ++x) {
684 memcached_instance_st *instance = memcached_instance_fetch(memc, x);
685
686 if (instance->response_count() > 0) {
687 return instance;
688 }
689 }
690
691 return NULL;
692 }
693
694 int error = poll(fds, host_index, memc->poll_timeout);
695 switch (error) {
696 case -1:
697 memcached_set_errno(*memc, get_socket_errno(), MEMCACHED_AT);
698 /* FALLTHROUGH */
699 case 0:
700 break;
701
702 default:
703 for (nfds_t x = 0; x < host_index; ++x) {
704 if (fds[x].revents & POLLIN) {
705 for (uint32_t y = 0; y < memcached_server_count(memc); ++y) {
706 memcached_instance_st *instance = memcached_instance_fetch(memc, y);
707
708 if (instance->fd == fds[x].fd) {
709 return instance;
710 }
711 }
712 }
713 }
714 }
715
716 return NULL;
717 }
718
719 /*
720 Eventually we will just kill off the server with the problem.
721 */
722 void memcached_io_reset(memcached_instance_st *instance) {
723 memcached_quit_server(instance, true);
724 }
725
726 /**
727 * Read a given number of bytes from the server and place it into a specific
728 * buffer. Reset the IO channel on this server if an error occurs.
729 */
730 memcached_return_t memcached_safe_read(memcached_instance_st *instance, void *dta,
731 const size_t size) {
732 size_t offset = 0;
733 char *data = static_cast<char *>(dta);
734
735 while (offset < size) {
736 ssize_t nread;
737 memcached_return_t rc;
738
739 while (
740 memcached_continue(rc = memcached_io_read(instance, data + offset, size - offset, nread))) {
741 };
742
743 if (memcached_failed(rc)) {
744 return rc;
745 }
746
747 offset += size_t(nread);
748 }
749
750 return MEMCACHED_SUCCESS;
751 }
752
753 memcached_return_t memcached_io_readline(memcached_instance_st *instance, char *buffer_ptr,
754 size_t size, size_t &total_nr) {
755 total_nr = 0;
756 bool line_complete = false;
757
758 while (line_complete == false) {
759 if (instance->read_buffer_length == 0) {
760 /*
761 * We don't have any data in the buffer, so let's fill the read
762 * buffer. Call the standard read function to avoid duplicating
763 * the logic.
764 */
765 ssize_t nread;
766 memcached_return_t rc = memcached_io_read(instance, buffer_ptr, 1, nread);
767 if (memcached_failed(rc) and rc == MEMCACHED_IN_PROGRESS) {
768 memcached_quit_server(instance, true);
769 return memcached_set_error(*instance, rc, MEMCACHED_AT);
770 } else if (memcached_failed(rc)) {
771 return rc;
772 }
773
774 if (*buffer_ptr == '\n') {
775 line_complete = true;
776 }
777
778 ++buffer_ptr;
779 ++total_nr;
780 }
781
782 /* Now let's look in the buffer and copy as we go! */
783 while (instance->read_buffer_length and total_nr < size and line_complete == false) {
784 *buffer_ptr = *instance->read_ptr;
785 if (*buffer_ptr == '\n') {
786 line_complete = true;
787 }
788 --instance->read_buffer_length;
789 ++instance->read_ptr;
790 ++total_nr;
791 ++buffer_ptr;
792 }
793
794 if (total_nr == size) {
795 return MEMCACHED_PROTOCOL_ERROR;
796 }
797 }
798
799 return MEMCACHED_SUCCESS;
800 }