[Gluster-devel] [PATCH BUG:3020 1/1] Fix duplicate quota/marker symbols.

Jeff Darcy jdarcy at redhat.com
Mon Jun 13 19:50:59 UTC 2011



Signed-off-by: Jeff Darcy <jdarcy at redhat.com>
---
 rpc/rpc-transport/socket/src/socket.c |  596
+++++++++++++++++++++++++++++----
 rpc/rpc-transport/socket/src/socket.h |   15 +
 2 files changed, 538 insertions(+), 73 deletions(-)

diff --git a/rpc/rpc-transport/socket/src/socket.c
b/rpc/rpc-transport/socket/src/socket.c
index 2948621..b52baaa 100644
--- a/rpc/rpc-transport/socket/src/socket.c
+++ b/rpc/rpc-transport/socket/src/socket.c
@@ -47,6 +47,14 @@
 #define GF_LOG_ERRNO(errno) ((errno == ENOTCONN) ? GF_LOG_DEBUG :
GF_LOG_ERROR)
 #define SA(ptr) ((struct sockaddr *)ptr)

+#define SSL_OWN_CERT_OPT    "transport.socket.ssl-own-cert"
+#define SSL_PRIVATE_KEY_OPT "transport.socket.ssl-private-key"
+#define SSL_CA_LIST_OPT     "transport.socket.ssl-ca-list"
+#define OWN_THREAD_OPT      "transport.socket.own-thread"
+
+#define POLL_MASK_INPUT  (POLLIN | POLLPRI)
+#define POLL_MASK_OUTPUT (POLLOUT)
+#define POLL_MASK_ERROR  (POLLERR | POLLHUP | POLLNVAL)

 #define __socket_proto_reset_pending(priv) do {                 \
                 memset (&priv->incoming.frag.vector, 0,         \
@@ -133,9 +141,127 @@
                 __socket_proto_update_priv_after_read (priv, ret,
bytes_read); \
         }

-
 int socket_init (rpc_transport_t *this);

+int
+ssl_setup_connection (socket_private_t *priv, int server)
+{
+	X509 *peer;
+	char  peer_CN[256];
+	int   ret;
+
+	priv->ssl_ssl = SSL_new(priv->ssl_ctx);
+	priv->ssl_sbio = BIO_new_socket(priv->sock,BIO_NOCLOSE);
+	SSL_set_bio(priv->ssl_ssl,priv->ssl_sbio,priv->ssl_sbio);
+	if (server) {
+		ret = SSL_accept(priv->ssl_ssl);
+	}
+	else {
+		ret = SSL_connect(priv->ssl_ssl);
+	}
+	if (ret >= 0) {
+		gf_log(__func__,GF_LOG_DEBUG,"verify_result = %lu (%d)",
+		       SSL_get_verify_result(priv->ssl_ssl), X509_V_OK);
+		peer = SSL_get_peer_certificate(priv->ssl_ssl);
+		if (peer) {
+			X509_NAME_get_text_by_NID(X509_get_subject_name(peer),
+				NID_commonName, peer_CN, sizeof(peer_CN)-1);
+			peer_CN[sizeof(peer_CN)-1] = '\0';
+			gf_log(__func__,GF_LOG_DEBUG,"peer CN = %s", peer_CN);
+		}
+	}
+	else {
+		unsigned long errnum;
+		char          errbuf[120];
+
+		gf_log(__func__,GF_LOG_ERROR,"connect error %d",
+		       SSL_get_error(priv->ssl_ssl,ret));
+		while ((errnum = ERR_get_error())) {
+			ERR_error_string(errnum,errbuf);
+			gf_log(__func__,GF_LOG_ERROR,"  %s",errbuf);
+		}
+	}
+	return ret;
+}
+
+int
+ssl_write_one (socket_private_t *priv, void *buf, size_t len)
+{
+	int           r;
+	struct pollfd pfd;
+
+	for (;;) {
+		r = SSL_write(priv->ssl_ssl,buf,len);
+		switch (SSL_get_error(priv->ssl_ssl,r)) {
+		case SSL_ERROR_NONE:
+			return r;
+		case SSL_ERROR_WANT_READ:
+			pfd.fd = priv->sock;
+			pfd.events = POLLIN;
+			if (poll(&pfd,1,-1) < 0) {
+				gf_log(__func__,GF_LOG_ERROR,"poll error %d",
+				       errno);
+				return -1;
+			}
+			break;
+		case SSL_ERROR_WANT_WRITE:
+			pfd.fd = priv->sock;
+			pfd.events = POLLOUT;
+			if (poll(&pfd,1,-1) < 0) {
+				gf_log(__func__,GF_LOG_ERROR,"poll error %d",
+				       errno);
+				return -1;
+			}
+			break;
+		default:
+			gf_log(__func__,GF_LOG_ERROR,"SSL error %lu",
+			       ERR_peek_error());
+			errno = EIO;
+			return -1;
+		}
+	}
+}
+
+int
+ssl_read_one (socket_private_t *priv, void *buf, size_t len)
+{
+	int           r;
+	struct pollfd pfd;
+
+	for (;;) {
+		r = SSL_read(priv->ssl_ssl,buf,len);
+		switch (SSL_get_error(priv->ssl_ssl,r)) {
+		case SSL_ERROR_NONE:
+			return r;
+		case SSL_ERROR_ZERO_RETURN:
+			return 0;
+		case SSL_ERROR_WANT_READ:
+			pfd.fd = priv->sock;
+			pfd.events = POLLIN;
+			if (poll(&pfd,1,-1) < 0) {
+				gf_log(__func__,GF_LOG_ERROR,"poll error %d",
+				       errno);
+				return -1;
+			}
+			break;
+		case SSL_ERROR_WANT_WRITE:
+			pfd.fd = priv->sock;
+			pfd.events = POLLOUT;
+			if (poll(&pfd,1,-1) < 0) {
+				gf_log(__func__,GF_LOG_ERROR,"poll error %d",
+				       errno);
+				return -1;
+			}
+			break;
+		default:
+			gf_log(__func__,GF_LOG_ERROR,"SSL error %lu",
+			       ERR_peek_error());
+			errno = EIO;
+			return -1;
+		}
+	}
+}
+
 /*
  * return value:
  *   0 = success (completed)
@@ -170,7 +296,13 @@ __socket_rwv (rpc_transport_t *this, struct iovec
*vector, int count,

         while (opcount) {
                 if (write) {
-                        ret = writev (sock, opvector, opcount);
+			if (priv->use_ssl) {
+				ret = ssl_write_one(priv,
+					opvector->iov_base, opvector->iov_len);
+			}
+			else {
+				ret = writev (sock, opvector, opcount);
+			}

                         if (ret == 0 || (ret == -1 && errno == EAGAIN)) {
                                 /* done for now */
@@ -178,7 +310,13 @@ __socket_rwv (rpc_transport_t *this, struct iovec
*vector, int count,
                         }
                         this->total_bytes_write += ret;
                 } else {
-                        ret = readv (sock, opvector, opcount);
+			if (priv->use_ssl) {
+				ret = ssl_read_one(priv,
+					opvector->iov_base, opvector->iov_len);
+			}
+			else {
+				ret = readv (sock, opvector, opcount);
+			}
                         if (ret == -1 && errno == EAGAIN) {
                                 /* done for now */
                                 break;
@@ -288,6 +426,15 @@ __socket_disconnect (rpc_transport_t *this)
                                 "shutdown() returned %d. %s",
                                 ret, strerror (errno));
                 }
+		if (priv->own_thread) {
+			/* TBD: SSL shutdown */
+			/*
+			 * Without this, reconnect (= disconnect + connect)
+			 * won't work except by accident.
+			 */
+			close(priv->sock);
+			priv->sock = -1;
+		}
         }

 out:
@@ -621,9 +768,11 @@ out:


 int
-__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry)
+__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry, int
direct)
 {
-        int ret = -1;
+        int               ret = -1;
+	socket_private_t *priv = NULL;
+	char              a_byte = 0;

         ret = __socket_writev (this, entry->pending_vector,
                                entry->pending_count,
@@ -634,6 +783,18 @@ __socket_ioq_churn_entry (rpc_transport_t *this,
struct ioq *entry)
                 /* current entry was completely written */
                 GF_ASSERT (entry->pending_count == 0);
                 __socket_ioq_entry_free (entry);
+		priv = this->private;
+		if (priv->own_thread) {
+			/*
+			 * The pipe should only remain readable if there are
+			 * more entries after this, so drain the byte
+			 * representing this entry.
+			 */
+			if (!direct && read(priv->pipe[0],&a_byte,1) < 1) {
+				gf_log(this->name,GF_LOG_WARNING,
+				       "read error on pipe");
+			}
+		}
         }

         return ret;
@@ -656,13 +817,13 @@ __socket_ioq_churn (rpc_transport_t *this)
                 /* pick next entry */
                 entry = priv->ioq_next;

-                ret = __socket_ioq_churn_entry (this, entry);
+                ret = __socket_ioq_churn_entry (this, entry, 0);

                 if (ret != 0)
                         break;
         }

-        if (list_empty (&priv->ioq)) {
+        if (!priv->own_thread && list_empty (&priv->ioq)) {
                 /* all pending writes done, not interested in POLLOUT */
                 priv->idx = event_select_on (this->ctx->event_pool,
                                              priv->sock, priv->idx, -1, 0);
@@ -1640,13 +1801,13 @@ socket_event_poll_in (rpc_transport_t *this)
 {
         int                     ret    = -1;
         rpc_transport_pollin_t *pollin = NULL;
+	socket_private_t       *priv   = this->private;

         ret = socket_proto_state_machine (this, &pollin);

         if (pollin != NULL) {
                 ret = rpc_transport_notify (this,
RPC_TRANSPORT_MSG_RECEIVED,
                                             pollin);
-
                 rpc_transport_pollin_destroy (pollin);
         }

@@ -1731,9 +1892,9 @@ int
 socket_event_handler (int fd, int idx, void *data,
                       int poll_in, int poll_out, int poll_err)
 {
-        rpc_transport_t      *this = NULL;
+        rpc_transport_t  *this = NULL;
         socket_private_t *priv = NULL;
-        int               ret = 0;
+	int               ret = -1;

         this = data;
         GF_VALIDATE_OR_GOTO ("socket", this, out);
@@ -1743,38 +1904,127 @@ socket_event_handler (int fd, int idx, void *data,
         THIS = this->xl;
         priv = this->private;

-
         pthread_mutex_lock (&priv->lock);
         {
                 priv->idx = idx;
         }
         pthread_mutex_unlock (&priv->lock);

-        if (!priv->connected) {
-                ret = socket_connect_finish (this);
-        }
+	ret = priv->connected ? 0 : socket_connect_finish(this);

         if (!ret && poll_out) {
                 ret = socket_event_poll_out (this);
         }
-
+
         if (!ret && poll_in) {
                 ret = socket_event_poll_in (this);
         }
-
+
         if ((ret < 0) || poll_err) {
                 /* Logging has happened already in earlier cases */
                 gf_log ("transport", ((ret >= 0) ? GF_LOG_INFO :
GF_LOG_DEBUG),
                         "disconnecting now");
                 socket_event_poll_err (this);
                 rpc_transport_unref (this);
-        }
+	}

 out:
-        return 0;
+	return ret;
+}
+
+
+void *
+socket_poller (void *ctx)
+{
+        rpc_transport_t  *this = ctx;
+        socket_private_t *priv = this->private;
+	struct pollfd     pfd[2] = {{0,},};
+	gf_boolean_t      to_write = _gf_false;
+	int               ret = 0;
+	int               orig_gen;
+
+        if (!priv->connected) {
+		THIS = this->xl;
+                ret = socket_connect_finish (this);
+		orig_gen = ++(priv->socket_gen);
+        }
+
+	for (;;) {
+		if (priv->socket_gen != orig_gen) {
+			gf_log(this->name,GF_LOG_DEBUG,
+			       "redundant poller exiting");
+			return NULL;
+		}
+		pthread_mutex_lock(&priv->lock);
+		to_write = !list_empty(&priv->ioq);
+		pthread_mutex_unlock(&priv->lock);
+		pfd[0].fd = priv->pipe[0];
+		pfd[0].events = POLL_MASK_ERROR;
+		pfd[0].revents = 0;
+		pfd[1].fd = priv->sock;
+		pfd[1].events = POLL_MASK_INPUT | POLL_MASK_ERROR;
+		pfd[1].revents = 0;
+		if (to_write) {
+			pfd[1].events |= POLL_MASK_OUTPUT;
+		}
+		else {
+			pfd[0].events |= POLL_MASK_INPUT;
+		}
+		if (poll(pfd,2,-1) < 0) {
+			gf_log(this->name,GF_LOG_ERROR,"poll failed");
+			return NULL;
+		}
+		if (pfd[0].revents & POLL_MASK_ERROR) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "poll error on pipe");
+			return NULL;
+		}
+		/* Only glusterd actually seems to need this. */
+		THIS = this->xl;
+		if (pfd[1].revents & POLL_MASK_INPUT) {
+			ret = socket_event_poll_in(this);
+			if (ret >= 0) {
+				/* Suppress errors while making progress. */
+				pfd[1].revents &= ~POLL_MASK_ERROR;
+			}
+			else if (errno == ENOTCONN) {
+				ret = 0;
+			}
+		}
+		else if (pfd[1].revents & POLL_MASK_OUTPUT) {
+			ret = socket_event_poll_out(this);
+			if (ret >= 0) {
+				/* Suppress errors while making progress. */
+				pfd[1].revents &= ~POLL_MASK_ERROR;
+			}
+			else if (errno == ENOTCONN) {
+				ret = 0;
+			}
+		}
+		else {
+			/*
+			 * This usually means that we left poll() because
+			 * somebody pushed a byte onto our pipe.  That wakeup
+			 * is why the pipe is there, but once awake we can do
+			 * all the checking we need on the next iteration.
+			 */
+			ret = 0;
+		}
+		if (pfd[1].revents & POLL_MASK_ERROR) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "poll error on socket");
+			return NULL;
+		}
+		if (ret < 0) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "unknown error in polling loop");
+			return NULL;
+		}
+	}
 }


+
 int
 socket_server_event_handler (int fd, int idx, void *data,
                              int poll_in, int poll_out, int poll_err)
@@ -1813,19 +2063,6 @@ socket_server_event_handler (int fd, int idx,
void *data,
                                 goto unlock;
                         }

-                        if (!priv->bio) {
-                                ret = __socket_nonblock (new_sock);
-
-                                if (ret == -1) {
-                                        gf_log (this->name, GF_LOG_WARNING,
-                                                "NBIO on %d failed (%s)",
-                                                new_sock, strerror
(errno));
-
-                                        close (new_sock);
-                                        goto unlock;
-                                }
-                        }
-
                         if (priv->nodelay) {
                                 ret = __socket_nodelay (new_sock);
                                 if (ret == -1) {
@@ -1883,20 +2120,61 @@ socket_server_event_handler (int fd, int idx,
void *data,
                         new_trans->listener = this;
                         new_priv = new_trans->private;

+			new_priv->use_ssl = priv->use_ssl;
+			new_priv->sock = new_sock;
+			new_priv->own_thread = priv->own_thread;
+
+			if (priv->use_ssl) {
+				new_priv->ssl_ctx = priv->ssl_ctx;
+				if (ssl_setup_connection(new_priv,1) < 0) {
+					gf_log(this->name,GF_LOG_ERROR,
+					       "server setup failed");
+					close(new_sock);
+					goto unlock;
+				}
+			}
+
+                        if (!priv->bio) {
+                                ret = __socket_nonblock (new_sock);
+
+                                if (ret == -1) {
+                                        gf_log (this->name, GF_LOG_WARNING,
+                                                "NBIO on %d failed (%s)",
+                                                new_sock, strerror
(errno));
+
+                                        close (new_sock);
+                                        goto unlock;
+                                }
+                        }
+
                         pthread_mutex_lock (&new_priv->lock);
                         {
-                                new_priv->sock = new_sock;
                                 new_priv->connected = 1;
                                 rpc_transport_ref (new_trans);

-                                new_priv->idx =
-                                        event_register (ctx->event_pool,
-                                                        new_sock,
-
socket_event_handler,
-                                                        new_trans, 1, 0);
+				if (new_priv->own_thread) {
+					if (pipe(new_priv->pipe) < 0) {
+						gf_log(this->name,GF_LOG_ERROR,
+						       "could not create pipe");
+					}
+					if (pthread_create(&new_priv->thread,
+							NULL, socket_poller,
+							new_trans) != 0) {
+						gf_log(this->name,GF_LOG_ERROR,
+						       "could not create poll thread");
+					}
+				}
+				else {
+					new_priv->idx =
+						event_register (ctx->event_pool,
+								new_sock,
+								socket_event_handler,
+								new_trans,
+								1, 0);
+					if (new_priv->idx == -1)
+						ret = -1;
+				}

-                                if (new_priv->idx == -1)
-                                        ret = -1;
                         }
                         pthread_mutex_unlock (&new_priv->lock);
                         if (ret == -1) {
@@ -2036,19 +2314,6 @@ socket_connect (rpc_transport_t *this, int port)
                         }
                 }

-                if (!priv->bio) {
-                        ret = __socket_nonblock (priv->sock);
-
-                        if (ret == -1) {
-                                gf_log (this->name, GF_LOG_ERROR,
-                                        "NBIO on %d failed (%s)",
-                                        priv->sock, strerror (errno));
-                                close (priv->sock);
-                                priv->sock = -1;
-                                goto unlock;
-                        }
-                }
-
                 if (priv->keepalive) {
                         ret = __socket_keepalive (priv->sock,
                                                   priv->keepaliveintvl,
@@ -2084,17 +2349,55 @@ socket_connect (rpc_transport_t *this, int port)
                         goto unlock;
                 }

-                priv->connected = 0;
+		if (priv->use_ssl) {
+			ret = ssl_setup_connection(priv,0);
+			if (ret < 0) {
+				gf_log(this->name,GF_LOG_ERROR,
+					"client setup failed");
+				close(priv->sock);
+				priv->sock = -1;
+				goto unlock;
+			}
+		}

-                rpc_transport_ref (this);
+                if (!priv->bio) {
+                        ret = __socket_nonblock (priv->sock);

-                priv->idx = event_register (ctx->event_pool, priv->sock,
-                                            socket_event_handler, this,
1, 1);
-                if (priv->idx == -1) {
-                        gf_log ("", GF_LOG_WARNING,
-                                "failed to register the event");
-                        ret = -1;
+                        if (ret == -1) {
+                                gf_log (this->name, GF_LOG_ERROR,
+                                        "NBIO on %d failed (%s)",
+                                        priv->sock, strerror (errno));
+                                close (priv->sock);
+                                priv->sock = -1;
+                                goto unlock;
+                        }
                 }
+
+                priv->connected = 0;
+                rpc_transport_ref (this);
+
+		if (priv->own_thread) {
+			if (pipe(priv->pipe) < 0) {
+				gf_log(this->name,GF_LOG_ERROR,
+				       "could not create pipe");
+			}
+
+			if (pthread_create(&priv->thread,NULL,
+					socket_poller, this) != 0) {
+				gf_log(this->name,GF_LOG_ERROR,
+				       "could not create poll thread");
+			}
+		}
+		else {
+			priv->idx = event_register (ctx->event_pool, priv->sock,
+						    socket_event_handler,
+						    this, 1, 1);
+			if (priv->idx == -1) {
+				gf_log ("", GF_LOG_WARNING,
+					"failed to register the event");
+				ret = -1;
+			}
+		}
         }
 unlock:
         pthread_mutex_unlock (&priv->lock);
@@ -2260,6 +2563,7 @@ socket_submit_request (rpc_transport_t *this,
rpc_transport_req_t *req)
         char              need_append = 1;
         struct ioq       *entry = NULL;
         glusterfs_ctx_t  *ctx = NULL;
+	char              a_byte = 'j';

         GF_VALIDATE_OR_GOTO ("socket", this, out);
         GF_VALIDATE_OR_GOTO ("socket", this->private, out);
@@ -2285,21 +2589,31 @@ socket_submit_request (rpc_transport_t *this,
rpc_transport_req_t *req)
                         goto unlock;

                 if (list_empty (&priv->ioq)) {
-                        ret = __socket_ioq_churn_entry (this, entry);
+                        ret = __socket_ioq_churn_entry (this, entry, 1);

-                        if (ret == 0)
+                        if (ret == 0) {
                                 need_append = 0;
-
-                        if (ret > 0)
+			}
+                        if (ret > 0) {
                                 need_poll_out = 1;
+			}
                 }

                 if (need_append) {
                         list_add_tail (&entry->list, &priv->ioq);
+			if (priv->own_thread) {
+				/*
+				 * Make sure the polling thread wakes up, by
+				 * writing a byte to represent this entry.
+				 */
+				if (write(priv->pipe[1],&a_byte,1) < 1) {
+					gf_log(this->name,GF_LOG_WARNING,
+					       "write error on pipe");
+				}
+			}
                         ret = 0;
                 }
-
-                if (need_poll_out) {
+                if (!priv->own_thread && need_poll_out) {
                         /* first entry to wait. continue writing on
POLLOUT */
                         priv->idx = event_select_on (ctx->event_pool,
                                                      priv->sock,
@@ -2323,6 +2637,7 @@ socket_submit_reply (rpc_transport_t *this,
rpc_transport_reply_t *reply)
         char              need_append = 1;
         struct ioq       *entry = NULL;
         glusterfs_ctx_t  *ctx = NULL;
+	char              a_byte = 'd';

         GF_VALIDATE_OR_GOTO ("socket", this, out);
         GF_VALIDATE_OR_GOTO ("socket", this->private, out);
@@ -2341,33 +2656,44 @@ socket_submit_reply (rpc_transport_t *this,
rpc_transport_reply_t *reply)
                         }
                         goto unlock;
                 }
+
                 priv->submit_log = 0;
                 entry = __socket_ioq_new (this, &reply->msg);
                 if (!entry)
                         goto unlock;
+
                 if (list_empty (&priv->ioq)) {
-                        ret = __socket_ioq_churn_entry (this, entry);
+                        ret = __socket_ioq_churn_entry (this, entry, 1);

-                        if (ret == 0)
+                        if (ret == 0) {
                                 need_append = 0;
-
-                        if (ret > 0)
+			}
+                        if (ret > 0) {
                                 need_poll_out = 1;
+			}
                 }

                 if (need_append) {
                         list_add_tail (&entry->list, &priv->ioq);
+			if (priv->own_thread) {
+				/*
+				 * Make sure the polling thread wakes up, by
+				 * writing a byte to represent this entry.
+				 */
+				if (write(priv->pipe[1],&a_byte,1) < 1) {
+					gf_log(this->name,GF_LOG_WARNING,
+					       "write error on pipe");
+				}
+			}
                         ret = 0;
                 }
-
-                if (need_poll_out) {
+                if (!priv->own_thread && need_poll_out) {
                         /* first entry to wait. continue writing on
POLLOUT */
                         priv->idx = event_select_on (ctx->event_pool,
                                                      priv->sock,
                                                      priv->idx, -1, 1);
                 }
         }
-
 unlock:
         pthread_mutex_unlock (&priv->lock);

@@ -2515,6 +2841,8 @@ socket_init (rpc_transport_t *this)
         char             *optstr = NULL;
         uint32_t          keepalive = 0;
         uint32_t          backlog = 0;
+	int               session_id = 0;
+	int               ssl_opts = 0;

         if (this->private) {
                 gf_log_callingfn (this->name, GF_LOG_ERROR,
@@ -2629,6 +2957,104 @@ socket_init (rpc_transport_t *this)
         }

         priv->windowsize = (int)windowsize;
+
+	if (dict_get_str(this->options,SSL_OWN_CERT_OPT,&optstr) == 0) {
+		priv->ssl_own_cert = gf_strdup(optstr);
+		++ssl_opts;
+	}
+	if (dict_get_str(this->options,SSL_PRIVATE_KEY_OPT,&optstr) == 0) {
+		priv->ssl_private_key = gf_strdup(optstr);
+		++ssl_opts;
+	}
+	if (dict_get_str(this->options,SSL_CA_LIST_OPT,&optstr) == 0) {
+		priv->ssl_ca_list = gf_strdup(optstr);
+		++ssl_opts;
+	}
+	switch (ssl_opts) {
+	case 0:
+		/* Not using SSL.  Boo hoo, but not an error. */
+		priv->use_ssl = _gf_false;
+		break;
+	case 3:
+		/* SSL is fully configured.  Yay. */
+		gf_log(this->name,GF_LOG_INFO,"SSL support enabled.");
+		priv->use_ssl = _gf_true;
+		break;
+	default:
+		/*
+		 * Tried to configure SSL, but something's missing.  If they
+		 * meant to secure the connection, continuing would violate
+		 * their trust.
+		 */
+		if (priv->ssl_own_cert) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "own-cert given without private-key/ca-list");
+			GF_FREE(priv->ssl_own_cert);
+		}
+		if (priv->ssl_private_key) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "private-key given without own-cert/ca-list");
+			GF_FREE(priv->ssl_private_key);
+		}
+		if (priv->ssl_ca_list) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "ca-list given without own-cert/private-key");
+			GF_FREE(priv->ssl_ca_list);
+		}
+		GF_FREE(priv);
+		goto out;
+	}
+
+	priv->own_thread = priv->use_ssl;
+	if (dict_get_str(this->options,OWN_THREAD_OPT,&optstr) == 0) {
+                if (gf_string2boolean (optstr, &priv->own_thread) != 0) {
+                        gf_log (this->name, GF_LOG_ERROR,
+				"invalid value given for own-thread boolean");
+		}
+	}
+	gf_log(this->name,GF_LOG_INFO,"using %s polling thread",
+	       priv->own_thread ? "private" : "system");
+
+	if (priv->use_ssl) {
+		SSL_library_init();
+		SSL_load_error_strings();
+		priv->ssl_meth = SSLv23_method();
+		priv->ssl_ctx = SSL_CTX_new(priv->ssl_meth);
+
+		if (!SSL_CTX_use_certificate_chain_file(priv->ssl_ctx,
+							priv->ssl_own_cert)) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "could not load our cert");
+			goto out;
+		}
+
+		if (!SSL_CTX_use_PrivateKey_file(priv->ssl_ctx,
+						 priv->ssl_private_key,
+						 SSL_FILETYPE_PEM)) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "could not load private key");
+			goto out;
+		}
+
+		if (!SSL_CTX_load_verify_locations(priv->ssl_ctx,
+						   priv->ssl_ca_list,0)) {
+			gf_log(this->name,GF_LOG_ERROR,
+			       "could not load CA list");
+			goto out;
+		}
+
+#if (OPENSSL_VERSION_NUMBER < 0x00905100L)
+		SSL_CTX_set_verify_depth(ctx,1);
+#endif
+
+		priv->ssl_session_id = ++session_id;
+		SSL_CTX_set_session_id_context(priv->ssl_ctx,
+					       (void *)&priv->ssl_session_id,
+					       sizeof(priv->ssl_session_id));
+
+		SSL_CTX_set_verify(priv->ssl_ctx,SSL_VERIFY_PEER,0);
+	}
+
 out:
         this->private = priv;

@@ -2733,5 +3159,29 @@ struct volume_options options[] = {
         { .key   = {"transport.socket.listen-backlog"},
           .type  = GF_OPTION_TYPE_INT
         },
+	{ .key   = {SSL_OWN_CERT_OPT},
+	  .type  = GF_OPTION_TYPE_STR
+	},
+       	{ .key   = {SSL_PRIVATE_KEY_OPT},
+	  .type  = GF_OPTION_TYPE_STR
+	},
+	{ .key   = {SSL_CA_LIST_OPT},
+	  .type  = GF_OPTION_TYPE_STR
+	},
+        { .key   = {"transport.socket.listen-backlog"},
+          .type  = GF_OPTION_TYPE_INT
+        },
+	{ .key   = {SSL_OWN_CERT_OPT},
+	  .type  = GF_OPTION_TYPE_STR
+	},
+       	{ .key   = {SSL_PRIVATE_KEY_OPT},
+	  .type  = GF_OPTION_TYPE_STR
+	},
+	{ .key   = {SSL_CA_LIST_OPT},
+	  .type  = GF_OPTION_TYPE_STR
+	},
+	{ .key   = {OWN_THREAD_OPT},
+	  .type  = GF_OPTION_TYPE_BOOL
+	},
         { .key = {NULL} }
 };
diff --git a/rpc/rpc-transport/socket/src/socket.h
b/rpc/rpc-transport/socket/src/socket.h
index 4acecab..19e5930 100644
--- a/rpc/rpc-transport/socket/src/socket.h
+++ b/rpc/rpc-transport/socket/src/socket.h
@@ -20,6 +20,8 @@
 #ifndef _SOCKET_H
 #define _SOCKET_H

+#include <openssl/ssl.h>
+#include <openssl/err.h>

 #ifndef _CONFIG_H
 #define _CONFIG_H
@@ -193,6 +195,19 @@ typedef struct {
         int                    keepaliveidle;
         int                    keepaliveintvl;
         uint32_t               backlog;
+	gf_boolean_t           use_ssl;
+	const SSL_METHOD      *ssl_meth;
+	SSL_CTX               *ssl_ctx;
+	int                    ssl_session_id;
+	BIO                   *ssl_sbio;
+	SSL                   *ssl_ssl;
+	char                  *ssl_own_cert;
+	char                  *ssl_private_key;
+	char                  *ssl_ca_list;
+	pthread_t              thread;
+	int                    pipe[2];
+	gf_boolean_t           own_thread;
+	int                    socket_gen;
 } socket_private_t;


--
1.7.3.4





More information about the Gluster-devel mailing list