Hello devs,

Following the bundle release in 1.1.17, I've been working on a
similar implementation for rkt.  Resource agent based on docker
is here:

  https://github.com/vvidic/resource-agents/blob/rkt/heartbeat/rkt

That part should be fairly functional, but I'm having a bit more
problems with the pacemaker part.  The patch I'm attaching is
working for me, but is not a real pull request as it breaks the
existing docker implementation :)  Could someone help me out to
get this into an acceptable state?

The first question I have is if the xml changes are ok for the
new container type?  Do I need to modify both resources-2.8.rng
and resources-2.9.rng or perhaps create a new version?

-- 
Valentin
--- a/xml/crm_mon.rng
+++ b/xml/crm_mon.rng
@@ -307,6 +307,7 @@
             <attribute name="type">
                 <choice>
                     <value>docker</value>
+                    <value>rkt</value>
                 </choice>
             </attribute>
             <attribute name="image"> <text/> </attribute>
--- a/xml/resources-2.8.rng
+++ b/xml/resources-2.8.rng
@@ -95,6 +95,27 @@
               <attribute name="options"><text/></attribute>
             </optional>
           </element>
+          <element name="rkt">
+            <attribute name="image"><text/></attribute>
+            <optional>
+              <attribute name="replicas"><data type="integer"/></attribute>
+            </optional>
+            <optional>
+              <attribute name="replicas-per-host"><data type="integer"/></attribute>
+            </optional>
+            <optional>
+              <attribute name="masters"><data type="integer"/></attribute>
+            </optional>
+            <optional>
+              <attribute name="run-command"> <text/></attribute>
+            </optional>
+            <optional>
+              <attribute name="network"><text/></attribute>
+            </optional>
+            <optional>
+              <attribute name="options"><text/></attribute>
+            </optional>
+          </element>
         </choice>
         <optional>
           <element name="network">
--- a/xml/resources-2.9.rng
+++ b/xml/resources-2.9.rng
@@ -95,6 +95,27 @@
               <attribute name="options"><text/></attribute>
             </optional>
           </element>
+          <element name="rkt">
+            <attribute name="image"><text/></attribute>
+            <optional>
+              <attribute name="replicas"><data type="integer"/></attribute>
+            </optional>
+            <optional>
+              <attribute name="replicas-per-host"><data type="integer"/></attribute>
+            </optional>
+            <optional>
+              <attribute name="masters"><data type="integer"/></attribute>
+            </optional>
+            <optional>
+              <attribute name="run-command"> <text/></attribute>
+            </optional>
+            <optional>
+              <attribute name="network"><text/></attribute>
+            </optional>
+            <optional>
+              <attribute name="options"><text/></attribute>
+            </optional>
+          </element>
         </choice>
         <optional>
           <element name="network">
--- a/lib/pengine/container.c
+++ b/lib/pengine/container.c
@@ -75,8 +75,8 @@
                     data->prefix, tuple->offset, tuple->ipaddr,
                     data->prefix, tuple->offset, data->prefix, tuple->offset);
 #else
-    return snprintf(buffer, max, " --add-host=%s-%d:%s",
-                    data->prefix, tuple->offset, tuple->ipaddr);
+    return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
+                    tuple->ipaddr, data->prefix, tuple->offset);
 #endif
 }
 
@@ -335,6 +335,161 @@
         return TRUE;
 }
 
+static bool
+create_rkt_resource(
+    resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
+    pe_working_set_t * data_set)
+{
+        int offset = 0, max = 4096;
+        char *buffer = calloc(1, max+1);
+
+        int doffset = 0, dmax = 1024;
+        char *dbuffer = calloc(1, dmax+1);
+
+        char *id = NULL;
+        xmlNode *xml_docker = NULL;
+        xmlNode *xml_obj = NULL;
+
+        int volid = 0;
+
+        id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
+        crm_xml_sanitize_id(id);
+        xml_docker = create_resource(id, "heartbeat", "rkt");
+        free(id);
+
+        xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
+        crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
+
+        create_nvp(xml_obj, "image", data->image);
+        create_nvp(xml_obj, "allow_pull", "true");
+        create_nvp(xml_obj, "force_kill", "false");
+        create_nvp(xml_obj, "reuse", "false");
+
+        /* Set a container hostname only if we have an IP to map it to.
+         * The user can set -h or --uts=host themselves if they want a nicer
+         * name for logs, but this makes applications happy who need their
+         * hostname to match the IP they bind to.
+         */
+        if (data->ip_range_start != NULL) {
+            offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
+                               data->prefix, tuple->offset);
+        }
+
+        if(data->docker_network) {
+//        offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
+            offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
+        }
+
+        if(data->control_port) {
+            offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
+        } else {
+            offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
+        }
+
+        for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
+            container_mount_t *mount = pIter->data;
+
+            if(mount->flags) {
+                char *source = crm_strdup_printf(
+                    "%s/%s-%d", mount->source, data->prefix, tuple->offset);
+
+                if(doffset > 0) {
+                    doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
+                }
+                doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
+                offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
+                if(mount->options) {
+                    offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
+                }
+                offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
+                free(source);
+
+            } else {
+                offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
+                if(mount->options) {
+                    offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
+                }
+                offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
+            }
+            volid++;
+        }
+
+        for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
+            container_port_t *port = pIter->data;
+
+            if(tuple->ipaddr) {
+                offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
+                                   port->target, tuple->ipaddr, port->source);
+            } else {
+                offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
+            }
+        }
+
+        if(data->docker_run_options) {
+            offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
+        }
+
+        if(data->docker_host_options) {
+            offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
+        }
+
+        create_nvp(xml_obj, "run_opts", buffer);
+        free(buffer);
+
+        create_nvp(xml_obj, "mount_points", dbuffer);
+        free(dbuffer);
+
+        if(tuple->child) {
+            if(data->docker_run_command) {
+                create_nvp(xml_obj, "run_cmd", data->docker_run_command);
+            } else {
+                create_nvp(xml_obj, "run_cmd", SBIN_DIR"/pacemaker_remoted");
+            }
+
+            /* TODO: Allow users to specify their own?
+             *
+             * We just want to know if the container is alive, we'll
+             * monitor the child independently
+             */
+            create_nvp(xml_obj, "monitor_cmd", "/bin/true");
+        /* } else if(child && data->untrusted) {
+         * Support this use-case?
+         *
+         * The ability to have resources started/stopped by us, but
+         * unable to set attributes, etc.
+         *
+         * Arguably better to control API access this with ACLs like
+         * "normal" remote nodes
+         *
+         *     create_nvp(xml_obj, "run_cmd", "/usr/libexec/pacemaker/lrmd");
+         *     create_nvp(xml_obj, "monitor_cmd", "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
+         */
+        } else {
+            if(data->docker_run_command) {
+                create_nvp(xml_obj, "run_cmd", data->docker_run_command);
+            }
+
+            /* TODO: Allow users to specify their own?
+             *
+             * We don't know what's in the container, so we just want
+             * to know if it is alive
+             */
+            create_nvp(xml_obj, "monitor_cmd", "/bin/true");
+        }
+
+
+        xml_obj = create_xml_node(xml_docker, "operations");
+        create_op(xml_obj, ID(xml_docker), "monitor", "60s");
+
+        // TODO: Other ops? Timeouts and intervals from underlying resource?
+
+        if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
+            return FALSE;
+        }
+        parent->children = g_list_append(parent->children, tuple->docker);
+        return TRUE;
+}
+
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
@@ -501,8 +656,14 @@
     pe_working_set_t * data_set)
 {
 
-    if(create_docker_resource(parent, data, tuple, data_set) == FALSE) {
-        return TRUE;
+    if (first_named_child(parent->xml, "docker") != NULL) {
+        if(create_docker_resource(parent, data, tuple, data_set) == FALSE) {
+            return TRUE;
+        }
+    } else if (first_named_child(parent->xml, "rkt") != NULL) {
+        if(create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
+            return TRUE;
+        }
     }
     if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
         return TRUE;
@@ -562,7 +723,11 @@
 
     xml_obj = first_named_child(rsc->xml, "docker");
     if(xml_obj == NULL) {
-        return FALSE;
+        xml_obj = first_named_child(rsc->xml, "rkt");
+
+        if(xml_obj == NULL) {
+            return FALSE;
+        }
     }
 
     value = crm_element_value(xml_obj, "masters");
_______________________________________________
Developers mailing list
[email protected]
http://lists.clusterlabs.org/mailman/listinfo/developers

Reply via email to