case EINTR:
continue;
+#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
-#ifdef USE_EAGAIN
- case EAGAIN:
#endif
+ case EAGAIN:
#ifdef TARGET_OS_LINUX
case ERESTART:
#endif
memcached_set_processing_input((memcached_st *)ptr->root, true);
char buffer[MEMCACHED_DEFAULT_COMMAND_SIZE];
- memcached_return_t error;
memcached_st *root= (memcached_st *)ptr->root;
- error= memcached_response(ptr, buffer, sizeof(buffer),
- &root->result);
+ memcached_return_t error= memcached_response(ptr, buffer, sizeof(buffer), &root->result);
memcached_set_processing_input(root, false);
*/
if (read_or_write == MEM_WRITE)
{
- memcached_return_t rc= memcached_purge(ptr);
- if (rc != MEMCACHED_SUCCESS && rc != MEMCACHED_STORED)
+ if (memcached_fatal(memcached_purge(ptr)))
{
return MEMCACHED_FAILURE;
}
** in the purge function to avoid duplicating the logic..
*/
{
- memcached_return_t rc;
WATCHPOINT_ASSERT(ptr->fd != INVALID_SOCKET);
- rc= memcached_purge(ptr);
+ memcached_return_t rc= memcached_purge(ptr);
- if (rc != MEMCACHED_SUCCESS && rc != MEMCACHED_STORED)
+ if (rc != MEMCACHED_SUCCESS and rc != MEMCACHED_STORED)
{
return false;
}
if (sent_length == SOCKET_ERROR)
{
- memcached_set_errno(*ptr, get_socket_errno(), MEMCACHED_AT);
#if 0 // @todo I should look at why we hit this bit of code hard frequently
WATCHPOINT_ERRNO(get_socket_errno());
WATCHPOINT_NUMBER(get_socket_errno());
{
case ENOBUFS:
continue;
+
+#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
-#ifdef USE_EAGAIN
- case EAGAIN:
#endif
+ case EAGAIN:
{
/*
* We may be blocked on write because the input buffer
}
WATCHPOINT_ASSERT(write_length == 0);
- if (memcached_is_udp(ptr->root))
- {
- ptr->write_buffer_offset= UDP_DATAGRAM_HEADER_LENGTH;
- }
- else
- {
- ptr->write_buffer_offset= 0;
- }
+ ptr->write_buffer_offset= 0;
return true;
}
continue;
case ETIMEDOUT: // OSX
+#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
-#ifdef USE_EAGAIN
- case EAGAIN:
#endif
+ case EAGAIN:
#ifdef TARGET_OS_LINUX
case ERESTART:
#endif
continue;
case ETIMEDOUT: // OSX
+#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
-#ifdef USE_EAGAIN
- case EAGAIN:
#endif
+ case EAGAIN:
#ifdef TARGET_OS_LINUX
case ERESTART:
#endif
while (length)
{
char *write_ptr;
- size_t should_write;
- size_t buffer_end;
-
- if (memcached_is_udp(ptr->root))
- {
- //UDP does not support partial writes
- buffer_end= MAX_UDP_DATAGRAM_LENGTH;
- should_write= length;
- if (ptr->write_buffer_offset + should_write > buffer_end)
- {
- return -1;
- }
- }
- else
- {
- buffer_end= MEMCACHED_MAX_BUFFER;
- should_write= buffer_end - ptr->write_buffer_offset;
- should_write= (should_write < length) ? should_write : length;
- }
+ size_t buffer_end= MEMCACHED_MAX_BUFFER;
+ size_t should_write= buffer_end -ptr->write_buffer_offset;
+ should_write= (should_write < length) ? should_write : length;
write_ptr= ptr->write_buffer + ptr->write_buffer_offset;
memcpy(write_ptr, buffer_ptr, should_write);
buffer_ptr+= should_write;
length-= should_write;
- if (ptr->write_buffer_offset == buffer_end and memcached_is_udp(ptr->root) == false)
+ if (ptr->write_buffer_offset == buffer_end)
{
WATCHPOINT_ASSERT(ptr->fd != INVALID_SOCKET);
{
#define MAX_SERVERS_TO_POLL 100
struct pollfd fds[MAX_SERVERS_TO_POLL];
- unsigned int host_index= 0;
+ nfds_t host_index= 0;
- for (uint32_t x= 0; x < memcached_server_count(memc) && host_index < MAX_SERVERS_TO_POLL; ++x)
+ for (uint32_t x= 0; x < memcached_server_count(memc) and host_index < MAX_SERVERS_TO_POLL; ++x)
{
memcached_server_write_instance_st instance= memcached_server_instance_fetch(memc, x);
break;
default:
- for (size_t x= 0; x < host_index; ++x)
+ for (nfds_t x= 0; x < host_index; ++x)
{
if (fds[x].revents & POLLIN)
{
for (uint32_t y= 0; y < memcached_server_count(memc); ++y)
{
- memcached_server_write_instance_st instance=
- memcached_server_instance_fetch(memc, y);
+ memcached_server_write_instance_st instance= memcached_server_instance_fetch(memc, y);
if (instance->fd == fds[x].fd)
+ {
return instance;
+ }
}
}
}