So far, hroller ignores the fact, that non-redundant instances exist.
One option to deal is non-redundant instances is to not schedule those
nodes for reboot. This is supported by adding the option --ignore-non-redundant.

Signed-off-by: Klaus Aehlig <[email protected]>
---
 src/Ganeti/HTools/CLI.hs             | 10 ++++++++++
 src/Ganeti/HTools/Program/Hroller.hs | 17 ++++++++++++++++-
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/src/Ganeti/HTools/CLI.hs b/src/Ganeti/HTools/CLI.hs
index 33e241d..3a88c9d 100644
--- a/src/Ganeti/HTools/CLI.hs
+++ b/src/Ganeti/HTools/CLI.hs
@@ -84,6 +84,7 @@ module Ganeti.HTools.CLI
   , oShowHelp
   , oShowVer
   , oShowComp
+  , oSkipNonRedundant
   , oStdSpec
   , oTieredSpec
   , oVerbose
@@ -150,6 +151,7 @@ data Options = Options
   , optShowInsts   :: Bool           -- ^ Whether to show the instance map
   , optShowNodes   :: Maybe [String] -- ^ Whether to show node status
   , optShowVer     :: Bool           -- ^ Just show the program version
+  , optSkipNonRedundant :: Bool      -- ^ Skip nodes with non-redundant 
instance
   , optStdSpec     :: Maybe RSpec    -- ^ Requested standard specs
   , optTestCount   :: Maybe Int      -- ^ Optional test count override
   , optTieredSpec  :: Maybe RSpec    -- ^ Requested specs for tiered mode
@@ -189,6 +191,7 @@ defaultOptions  = Options
   , optNoSimulation = False
   , optNodeSim     = []
   , optNodeTags    = Nothing
+  , optSkipNonRedundant = False
   , optOffline     = []
   , optOfflineMaintenance = False
   , optOneStepOnly = False
@@ -549,6 +552,13 @@ oSaveCluster =
    "Save cluster state at the end of the processing to FILE",
    OptComplNone)
 
+oSkipNonRedundant :: OptType
+oSkipNonRedundant =
+  (Option "" ["skip-non-redundant"]
+   (NoArg (\ opts -> Ok opts { optSkipNonRedundant = True }))
+    "Skip nodes that host a non-redundant instance",
+    OptComplNone)
+
 oStdSpec :: OptType
 oStdSpec =
   (Option "" ["standard-alloc"]
diff --git a/src/Ganeti/HTools/Program/Hroller.hs 
b/src/Ganeti/HTools/Program/Hroller.hs
index 4e3dd3e..88b3c34 100644
--- a/src/Ganeti/HTools/Program/Hroller.hs
+++ b/src/Ganeti/HTools/Program/Hroller.hs
@@ -65,6 +65,7 @@ options = do
     , oNodeTags
     , oSaveCluster
     , oGroup
+    , oSkipNonRedundant
     , oForce
     , oOneStepOnly
     ]
@@ -101,6 +102,18 @@ hasTag :: Maybe [String] -> Node.Node -> Bool
 hasTag Nothing _ = True
 hasTag (Just tags) node = not . null $ Node.nTags node `intersect` tags
 
+-- | From a cluster configuration, get the list of non-redundant instances
+-- of a node.
+nonRedundant :: (Node.List, Instance.List) -> Ndx -> [Idx]
+nonRedundant (nl, il) ndx =
+  filter (not . Instance.hasSecondary . flip Container.find  il) $
+  Node.pList (Container.find ndx nl)
+
+-- | Within a cluster configuration, decide if the node hosts non-redundant
+-- Instances.
+noNonRedundant :: (Node.List, Instance.List) -> Node.Node -> Bool
+noNonRedundant conf = null . nonRedundant conf . Node.idx
+
 -- | Put the master node last.
 -- Reorder a list of lists of nodes such that the master node (if present)
 -- is the last node of the last group.
@@ -141,6 +154,9 @@ main opts args = do
 
   let nodes = IntMap.filter (foldl (liftA2 (&&)) (const True)
                              [ not . Node.offline
+                             , if optSkipNonRedundant opts
+                                  then noNonRedundant (nlf, ilf)
+                                  else const True
                              , hasTag $ optNodeTags opts
                              , hasGroup wantedGroup ])
               nlf
@@ -185,4 +201,3 @@ main opts args = do
        unless (optNoHeaders opts) $
               putStrLn "'Node Reboot Groups'"
        mapM_ (putStrLn . commaJoin) outputRebootNames
-
-- 
1.8.2.1

Reply via email to