Hi,

thanks for the patch! I'll test it and let you know soon.

  Best regards,
  Petr


On Mon, Apr 28, 2014 at 1:51 PM, 'BSRK Aditya <[email protected]>' via
ganeti-devel <[email protected]> wrote:

> This commit adds the feature requested in Issue 738.
> The reporter asked for the ability to create an instance
> to a specific group.
>
> Signed-off-by: BSRK Aditya <[email protected]>
> ---
>  lib/cli.py                          |  1 +
>  lib/client/gnt_instance.py          |  1 +
>  lib/cmdlib/instance.py              |  1 +
>  lib/cmdlib/test.py                  |  2 +
>  lib/masterd/iallocator.py           |  2 +
>  src/Ganeti/HTools/Backend/IAlloc.hs | 16 +++++---
>  src/Ganeti/HTools/Cluster.hs        | 75
> ++++++++++++++++++++++++++++++-------
>  src/Ganeti/HTools/Loader.hs         | 19 +++++++---
>  src/Ganeti/OpCodes.hs               |  2 +
>  test/hs/Test/Ganeti/OpCodes.hs      |  4 +-
>  test/py/cmdlib/test_unittest.py     |  2 +
>  11 files changed, 100 insertions(+), 25 deletions(-)
>
> diff --git a/lib/cli.py b/lib/cli.py
> index 04ee4e0..5c72e97 100644
> --- a/lib/cli.py
> +++ b/lib/cli.py
> @@ -2832,6 +2832,7 @@ def GenericInstanceCreate(mode, opts, args):
>                                  nics=nics,
>                                  conflicts_check=opts.conflicts_check,
>                                  pnode=pnode, snode=snode,
> +                                group_name = opts.nodegroup,
>                                  ip_check=opts.ip_check,
>                                  name_check=opts.name_check,
>                                  wait_for_sync=opts.wait_for_sync,
> diff --git a/lib/client/gnt_instance.py b/lib/client/gnt_instance.py
> index 8ded045..2198c9a 100644
> --- a/lib/client/gnt_instance.py
> +++ b/lib/client/gnt_instance.py
> @@ -1485,6 +1485,7 @@ add_opts = [
>    NO_INSTALL_OPT,
>    IGNORE_IPOLICY_OPT,
>    INSTANCE_COMMUNICATION_OPT,
> +  NODEGROUP_OPT,
>    ]
>
>  commands = {
> diff --git a/lib/cmdlib/instance.py b/lib/cmdlib/instance.py
> index 120b239..f86942a 100644
> --- a/lib/cmdlib/instance.py
> +++ b/lib/cmdlib/instance.py
> @@ -121,6 +121,7 @@ def _CreateInstanceAllocRequest(op, disks, nics,
> beparams, node_name_whitelist):
>    """
>    spindle_use = beparams[constants.BE_SPINDLE_USE]
>    return iallocator.IAReqInstanceAlloc(name=op.instance_name,
> +                                       group_name = op.group_name,
>                                         disk_template=op.disk_template,
>                                         tags=op.tags,
>                                         os=op.os_type,
> diff --git a/lib/cmdlib/test.py b/lib/cmdlib/test.py
> index 2b10740..d83ed67 100644
> --- a/lib/cmdlib/test.py
> +++ b/lib/cmdlib/test.py
> @@ -402,6 +402,7 @@ class LUTestAllocator(NoHooksLU):
>                                            vcpus=self.op.vcpus,
>                                            spindle_use=self.op.spindle_use,
>                                            hypervisor=self.op.hypervisor,
> +                                          group_name = self.op.group_name,
>                                            node_whitelist=None)
>      elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
>        req = iallocator.IAReqRelocate(
> @@ -425,6 +426,7 @@ class LUTestAllocator(NoHooksLU):
>                                               vcpus=self.op.vcpus,
>
> spindle_use=self.op.spindle_use,
>
> hypervisor=self.op.hypervisor,
> +                                             group_name =
> self.op.group_name,
>                                               node_whitelist=None)
>                 for idx in range(self.op.count)]
>        req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
> diff --git a/lib/masterd/iallocator.py b/lib/masterd/iallocator.py
> index 4ceae4a..8fd44cb 100644
> --- a/lib/masterd/iallocator.py
> +++ b/lib/masterd/iallocator.py
> @@ -164,6 +164,7 @@ class IAReqInstanceAlloc(IARequestBase):
>      ("vcpus", ht.TInt),
>      ("hypervisor", ht.TString),
>      ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
> +    ("group_name", ht.TMaybe(ht.TNonEmptyString)),
>      ]
>    REQ_RESULT = ht.TList
>
> @@ -198,6 +199,7 @@ class IAReqInstanceAlloc(IARequestBase):
>        "nics": self.nics,
>        "required_nodes": self.RequiredNodes(),
>        "hypervisor": self.hypervisor,
> +      "group_name": self.group_name,
>        }
>
>    def ValidateResult(self, ia, result):
> diff --git a/src/Ganeti/HTools/Backend/IAlloc.hs
> b/src/Ganeti/HTools/Backend/IAlloc.hs
> index 866efe7..c7d9efa 100644
> --- a/src/Ganeti/HTools/Backend/IAlloc.hs
> +++ b/src/Ganeti/HTools/Backend/IAlloc.hs
> @@ -211,10 +211,11 @@ parseData now body = do
>        _ | optype == C.iallocatorModeAlloc ->
>              do
>                rname     <- extrReq "name"
> +              rgn       <- maybeFromObj request "group_name"
>                req_nodes <- extrReq "required_nodes"
>                inew      <- parseBaseInstance rname request
>                let io = updateExclTags (extractExTags ctags) $ snd inew
> -              return $ Allocate io req_nodes
> +              return $ Allocate io (AllocDetails req_nodes rgn)
>          | optype == C.iallocatorModeReloc ->
>              do
>                rname     <- extrReq "name"
> @@ -243,14 +244,15 @@ parseData now body = do
>              do
>                arry <- extrReq "instances" :: Result [JSObject JSValue]
>                let inst_reqs = map fromJSObject arry
> -              prqs <- mapM (\r ->
> +              prqs <- forM inst_reqs (\r ->
>                                 do
>                                   rname     <- extrFromReq r "name"
> +                                 rgn       <- maybeFromObj request
> "group_name"
>                                   req_nodes <- extrFromReq r
> "required_nodes"
>                                   inew      <- parseBaseInstance rname r
>                                   let io = updateExclTags (extractExTags
> ctags)
>                                              $ snd inew
> -                                 return (io, req_nodes)) inst_reqs
> +                                 return (io, AllocDetails req_nodes rgn))
>                return $ MultiAllocate prqs
>          | otherwise -> fail ("Invalid request type '" ++ optype ++ "'")
>    return (msgs, Request rqtype cdata)
> @@ -399,8 +401,10 @@ processRequest :: Request -> Result IAllocResult
>  processRequest request =
>    let Request rqtype (ClusterData gl nl il _ _) = request
>    in case rqtype of
> -       Allocate xi reqn ->
> +       Allocate xi (AllocDetails reqn Nothing) ->
>           Cluster.tryMGAlloc gl nl il xi reqn >>= formatAllocate il
> +       Allocate xi (AllocDetails reqn (Just gn)) ->
> +         Cluster.tryGroupAlloc gl nl il gn xi reqn >>= formatAllocate il
>         Relocate idx reqn exnodes ->
>           processRelocate gl nl il idx reqn exnodes >>= formatRelocate
>         ChangeGroup gdxs idxs ->
> @@ -410,7 +414,9 @@ processRequest request =
>           Cluster.tryNodeEvac gl nl il mode xi >>=
>                  formatNodeEvac gl nl il
>         MultiAllocate xies ->
> -         Cluster.allocList gl nl il xies [] >>= formatMultiAlloc
> +         let flatten (inst, AllocDetails reqn mgn) = (inst, reqn, mgn)
> +         in Cluster.allocList gl nl il (map flatten xies) [] >>=
> +            formatMultiAlloc
>
>  -- | Reads the request from the data file(s).
>  readRequest :: FilePath -> IO Request
> diff --git a/src/Ganeti/HTools/Cluster.hs b/src/Ganeti/HTools/Cluster.hs
> index a95c613..3956a55 100644
> --- a/src/Ganeti/HTools/Cluster.hs
> +++ b/src/Ganeti/HTools/Cluster.hs
> @@ -63,6 +63,7 @@ module Ganeti.HTools.Cluster
>    -- * IAllocator functions
>    , genAllocNodes
>    , tryAlloc
> +  , tryGroupAlloc
>    , tryMGAlloc
>    , tryNodeEvac
>    , tryChangeGroup
> @@ -77,7 +78,7 @@ module Ganeti.HTools.Cluster
>    , splitCluster
>    ) where
>
> -import Control.Applicative (liftA2)
> +import Control.Applicative ((<$>), liftA2)
>  import Control.Arrow ((&&&))
>  import qualified Data.IntSet as IntSet
>  import Data.List
> @@ -821,6 +822,14 @@ sortMGResults sols =
>                               (extractScore . fromJust . asSolution) sol)
>    in sortBy (comparing solScore) sols
>
> +-- | Determines if a group is connected to the networks required by the
> +-- | instance.
> +hasRequiredNetworks :: Group.Group -> Instance.Instance -> Bool
> +hasRequiredNetworks ng inst = all hasNetwork (Instance.nics inst)
> +  where hasNetwork nic = case Nic.network nic of
> +          Just net -> net `elem` Group.networks ng
> +          Nothing -> True
> +
>  -- | Removes node groups which can't accommodate the instance
>  filterValidGroups :: [(Group.Group, (Node.List, Instance.List))]
>                    -> Instance.Instance
> @@ -828,17 +837,37 @@ filterValidGroups :: [(Group.Group, (Node.List,
> Instance.List))]
>  filterValidGroups [] _ = ([], [])
>  filterValidGroups (ng:ngs) inst =
>    let (valid_ngs, msgs) = filterValidGroups ngs inst
> -      hasNetwork nic = case Nic.network nic of
> -        Just net -> net `elem` Group.networks (fst ng)
> -        Nothing -> True
> -      hasRequiredNetworks = all hasNetwork (Instance.nics inst)
> -  in if hasRequiredNetworks
> +      hasRequiredNetworks' = hasRequiredNetworks (fst ng) inst
> +  in if hasRequiredNetworks'
>        then (ng:valid_ngs, msgs)
>        else (valid_ngs,
>              ("group " ++ Group.name (fst ng) ++
>               " is not connected to a network required by instance " ++
>               Instance.name inst):msgs)
>
> +-- | Finds an allocation solution for an instance on a group
> +findAllocation :: Group.List           -- ^ The group list
> +               -> Node.List            -- ^ The node list
> +               -> Instance.List        -- ^ The instance list
> +               -> Gdx                  -- ^ The group to allocate to
> +               -> Instance.Instance    -- ^ The instance to allocate
> +               -> Int                  -- ^ Required number of nodes
> +               -> Result (AllocSolution, [String])
> +findAllocation mggl mgnl mgil gdx inst cnt =
> +  let group' = Container.find gdx mggl
> +      hasRequiredNetworks' = hasRequiredNetworks group' inst
> +  in if hasRequiredNetworks'
> +    then let belongsTo nl' nidx = elem nidx (map Node.idx $
> Container.elems nl')
> +             nl = Container.filter ((== gdx) . Node.group) mgnl
> +             il = Container.filter (belongsTo nl . Instance.pNode) mgil
> +        in do
> +          solution <- genAllocNodes mggl nl cnt False >>=
> +                      tryAlloc nl il inst
> +          return (solution, solutionDescription (group', return solution))
> +    else Bad $ "The group " ++ Group.name group' ++
> +         "is not connected to a network required by instance " ++
> +        Instance.name inst
> +
>  -- | Finds the best group for an instance on a multi-group cluster.
>  --
>  -- Only solutions in @preferred@ and @last_resort@ groups will be
> @@ -888,6 +917,19 @@ tryMGAlloc mggl mgnl mgil inst cnt = do
>        selmsg = "Selected group: " ++ group_name
>    return $ solution { asLog = selmsg:all_msgs }
>
> +-- | Try to allocate an instance to a group.
> +tryGroupAlloc :: Group.List           -- ^ The group list
> +              -> Node.List            -- ^ The node list
> +              -> Instance.List        -- ^ The instance list
> +              -> String               -- ^ The allocation group (index)
> +              -> Instance.Instance    -- ^ The instance to allocate
> +              -> Int                  -- ^ Required number of nodes
> +              -> Result AllocSolution -- ^ Solution
> +tryGroupAlloc mggl mgnl ngil gn inst cnt = do
> +  gdx <- Group.idx <$> Container.findByName mggl gn
> +  (solution, msgs) <- findAllocation mggl mgnl ngil gdx inst cnt
> +  return $ solution { asLog = msgs }
> +
>  -- | Calculate the new instance list after allocation solution.
>  updateIl :: Instance.List           -- ^ The original instance list
>           -> Maybe Node.AllocElement -- ^ The result of the allocation
> attempt
> @@ -903,16 +945,21 @@ extractNl nl Nothing = nl
>  extractNl _ (Just (xnl, _, _, _)) = xnl
>
>  -- | Try to allocate a list of instances on a multi-group cluster.
> -allocList :: Group.List                  -- ^ The group list
> -          -> Node.List                   -- ^ The node list
> -          -> Instance.List               -- ^ The instance list
> -          -> [(Instance.Instance, Int)]  -- ^ The instance to allocate
> -          -> AllocSolutionList           -- ^ Possible solution list
> +allocList :: Group.List                                -- ^ The group list
> +          -> Node.List                                 -- ^ The node list
> +          -> Instance.List                             -- ^ The instance
> list
> +          -> [(Instance.Instance, Int, Maybe String)]  -- ^ The instance
> to
> +                                                       --   allocate
> +          -> AllocSolutionList                         -- ^ Possible
> solution
> +                                                       --   list
>            -> Result (Node.List, Instance.List,
> -                     AllocSolutionList)  -- ^ The final solution list
> +                     AllocSolutionList)                -- ^ The final
> solution
> +                                                       --   list
>  allocList _  nl il [] result = Ok (nl, il, result)
> -allocList gl nl il ((xi, xicnt):xies) result = do
> -  ares <- tryMGAlloc gl nl il xi xicnt
> +allocList gl nl il ((xi, xicnt, mgn):xies) result = do
> +  ares <- case mgn of
> +    Nothing -> tryMGAlloc gl nl il xi xicnt
> +    Just gn -> tryGroupAlloc gl nl il gn xi xicnt
>    let sol = asSolution ares
>        nl' = extractNl nl sol
>        il' = updateIl il sol
> diff --git a/src/Ganeti/HTools/Loader.hs b/src/Ganeti/HTools/Loader.hs
> index 7440ba3..69c10ec 100644
> --- a/src/Ganeti/HTools/Loader.hs
> +++ b/src/Ganeti/HTools/Loader.hs
> @@ -40,6 +40,7 @@ module Ganeti.HTools.Loader
>    , extractExTags
>    , updateExclTags
>    , RqType(..)
> +  , AllocDetails(..)
>    , Request(..)
>    , ClusterData(..)
>    , emptyCluster
> @@ -79,13 +80,21 @@ request-specific fields.
>
>  -}
>  data RqType
> -  = Allocate Instance.Instance Int           -- ^ A new instance
> allocation
> -  | Relocate Idx Int [Ndx]                   -- ^ Choose a new secondary
> node
> -  | NodeEvacuate [Idx] EvacMode              -- ^ node-evacuate mode
> -  | ChangeGroup [Gdx] [Idx]                  -- ^ Multi-relocate mode
> -  | MultiAllocate [(Instance.Instance, Int)] -- ^ Multi-allocate mode
> +  = Allocate Instance.Instance AllocDetails           -- ^ A new instance
> +                                                      --   allocation
> +  | Relocate Idx Int [Ndx]                            -- ^ Choose a new
> +                                                      --   secondary node
> +  | NodeEvacuate [Idx] EvacMode                       -- ^ node-evacuate
> mode
> +  | ChangeGroup [Gdx] [Idx]                           -- ^ Multi-relocate
> mode
> +  | MultiAllocate [(Instance.Instance, AllocDetails)] -- ^ Multi-allocate
> mode
>      deriving (Show)
>
> +-- | Allocation details for an instance, specifying
> +-- | required number of nodes, and
> +-- | an optional group (name) to allocate to
> +data AllocDetails = AllocDetails Int (Maybe String)
> +                    deriving (Show)
> +
>  -- | A complete request, as received from Ganeti.
>  data Request = Request RqType ClusterData
>                 deriving (Show)
> diff --git a/src/Ganeti/OpCodes.hs b/src/Ganeti/OpCodes.hs
> index 0cb840c..672e597 100644
> --- a/src/Ganeti/OpCodes.hs
> +++ b/src/Ganeti/OpCodes.hs
> @@ -428,6 +428,7 @@ $(genOpCode "OpCode"
>       , pInstBeParams
>       , pInstDisks
>       , pOptDiskTemplate
> +     , pOptGroupName
>       , pFileDriver
>       , pFileStorageDir
>       , pInstHvParams
> @@ -850,6 +851,7 @@ $(genOpCode "OpCode"
>       , pTargetGroups
>       , pIAllocatorSpindleUse
>       , pIAllocatorCount
> +     , pOptGroupName
>       ],
>       "iallocator")
>    , ("OpTestJqueue",
> diff --git a/test/hs/Test/Ganeti/OpCodes.hs
> b/test/hs/Test/Ganeti/OpCodes.hs
> index da791ad..70bfc63 100644
> --- a/test/hs/Test/Ganeti/OpCodes.hs
> +++ b/test/hs/Test/Ganeti/OpCodes.hs
> @@ -268,6 +268,7 @@ instance Arbitrary OpCodes.OpCode where
>            <*> pure emptyJSObject              -- beparams
>            <*> arbitrary                       -- disks
>            <*> arbitrary                       -- disk_template
> +          <*> genMaybe genNameNE              -- group_name
>            <*> arbitrary                       -- file_driver
>            <*> genMaybe genNameNE              -- file_storage_dir
>            <*> pure emptyJSObject              -- hvparams
> @@ -412,7 +413,8 @@ instance Arbitrary OpCodes.OpCode where
>            (genTags >>= mapM mkNonEmpty) <*>
>            arbitrary <*> arbitrary <*> genMaybe genNameNE <*>
>            arbitrary <*> genMaybe genNodeNamesNE <*> arbitrary <*>
> -          genMaybe genNamesNE <*> arbitrary <*> arbitrary
> +          genMaybe genNamesNE <*> arbitrary <*> arbitrary <*>
> +          genMaybe genNameNE
>        "OP_TEST_JQUEUE" ->
>          OpCodes.OpTestJqueue <$> arbitrary <*> arbitrary <*>
>            resize 20 (listOf genFQDN) <*> arbitrary
> diff --git a/test/py/cmdlib/test_unittest.py
> b/test/py/cmdlib/test_unittest.py
> index 47fc761..d4d68e0 100644
> --- a/test/py/cmdlib/test_unittest.py
> +++ b/test/py/cmdlib/test_unittest.py
> @@ -126,6 +126,7 @@ class TestLUTestAllocator(CmdlibTestCase):
>                        memory=0,
>                        disk_template=constants.DT_DISKLESS,
>                        os="mock_os",
> +                      group_name = "default",
>                        vcpus=1)
>      self.valid_multi_alloc_op = \
>        self.CopyOpCode(self.base_op,
> @@ -134,6 +135,7 @@ class TestLUTestAllocator(CmdlibTestCase):
>                        memory=0,
>                        disk_template=constants.DT_DISKLESS,
>                        os="mock_os",
> +                      group_name = "default",
>                        vcpus=1)
>      self.valid_reloc_op = \
>        self.CopyOpCode(self.base_op,
> --
> 1.9.1.423.g4596e3a
>
>

Reply via email to