Hi, 
This patch already had a couple of rounds of offline reviews previously, I 
find this latest version good. I'd like if someone with longtime Haskell 
experience would take a peek though, just to make sure we didn't miss 
anything important coding style/readability wise.
Thanks,
Viktor

On Friday, September 2, 2016 at 3:04:45 PM UTC+1, Federico Pareschi wrote:
>
> This is just a partial implementation of the static lock prediction 
> system. 
> With this, Ganeti queue system tries to predict the locks of the running 
> jobs and schedule new jobs that are less likely to block. 
> It is a partial implementation which might cause starvation for jobs stuck 
> in the queue, following patches will address this issue. 
>
> Signed-off-by: Federico Morg Pareschi <[email protected]> 
> --- 
>  src/Ganeti/Config.hs                    |   1 + 
>  src/Ganeti/Constants.hs                 |  27 ++ 
>  src/Ganeti/JQScheduler.hs               |  26 +- 
>  src/Ganeti/JQueue.hs                    |   1 + 
>  src/Ganeti/JQueue/LockDecls.hs          | 590 ++++++++++++++++++++++++ 
>  src/Ganeti/Objects.hs                   |   1 - 
>  test/hs/Test/Ganeti/JQueue/LockDecls.hs | 150 ++++++ 
>  test/hs/Test/Ganeti/Objects.hs          |  31 +- 
>  test/hs/Test/Ganeti/OpCodes.hs          | 777 
> ++++++++++++++++---------------- 
>  test/hs/Test/Ganeti/TestCommon.hs       |  13 +- 
>  test/hs/htest.hs                        |   2 + 
>  11 files changed, 1221 insertions(+), 398 deletions(-) 
>  create mode 100644 src/Ganeti/JQueue/LockDecls.hs 
>  create mode 100644 test/hs/Test/Ganeti/JQueue/LockDecls.hs 
>
> diff --git a/src/Ganeti/Config.hs b/src/Ganeti/Config.hs 
> index 5386787..8b02c6a 100644 
> --- a/src/Ganeti/Config.hs 
> +++ b/src/Ganeti/Config.hs 
> @@ -81,6 +81,7 @@ module Ganeti.Config 
>      , getAllLVs 
>      , buildLinkIpInstnameMap 
>      , instNodes 
> +    , instName 
>      ) where 
>   
>  import Prelude () 
> diff --git a/src/Ganeti/Constants.hs b/src/Ganeti/Constants.hs 
> index e20311e..1e0b877 100644 
> --- a/src/Ganeti/Constants.hs 
> +++ b/src/Ganeti/Constants.hs 
> @@ -5607,3 +5607,30 @@ cliWfjcFrequency = 20 
>  -- | Default 'WaitForJobChange' timeout in seconds 
>  defaultWfjcTimeout :: Int 
>  defaultWfjcTimeout = 60 
> + 
> +-- | Base value for static lock declaration weight. 
> +staticLockBaseWeight :: Double 
> +staticLockBaseWeight = 1.0 
> + 
> +-- | Weight assigned to two locks that do not conflict with each other. 
> +staticLockNoWeight :: Double 
> +staticLockNoWeight = 0 
> + 
> +-- | Weight assigned to two locks that do not conflict but whose 
> introduction 
> +-- in the system raises the chance of contention. 
> +staticLockDegradeWeight :: Double 
> +staticLockDegradeWeight = 0.3 
> + 
> +-- | Weight assigned to two locks that do not conflict but whose 
> introduction 
> +-- in the system significantly raises the chance of contention. 
> +staticLockHeavyDegradeWeight :: Double 
> +staticLockHeavyDegradeWeight = 0.5 
> + 
> +-- | Weight assigned to two locks that might conflict but there is not 
> enough 
> +-- available data to know with certainty. 
> +staticLockMaybeBlockWeight :: Double 
> +staticLockMaybeBlockWeight = 1.5 
> + 
> +-- | Weight assigned to two locks that will surely conflict. 
> +staticLockSureBlockWeight :: Double 
> +staticLockSureBlockWeight = 3 
> diff --git a/src/Ganeti/JQScheduler.hs b/src/Ganeti/JQScheduler.hs 
> index 1069d06..14680e6 100644 
> --- a/src/Ganeti/JQScheduler.hs 
> +++ b/src/Ganeti/JQScheduler.hs 
> @@ -86,10 +86,12 @@ import Ganeti.JQScheduler.Filtering (applyingFilter, 
> jobFiltering) 
>  import Ganeti.JQScheduler.Types 
>  import Ganeti.JQScheduler.ReasonRateLimiting (reasonRateLimit) 
>  import Ganeti.JQueue as JQ 
> +import Ganeti.JQueue.LockDecls 
>  import Ganeti.JSON (fromContainer) 
>  import Ganeti.Lens hiding (chosen) 
>  import Ganeti.Logging 
>  import Ganeti.Objects 
> +import Ganeti.OpCodes 
>  import Ganeti.Path 
>  import Ganeti.Query.Exec (forkPostHooksProcess) 
>  import Ganeti.Types 
> @@ -322,20 +324,36 @@ jobEligible queue jWS = 
>        blocks = flip elem jdeps . qjId . jJob 
>    in not . any blocks . liftA2 (++) qRunning qEnqueued $ queue 
>   
> +extractOpCode :: JobWithStat -> Maybe OpCode 
> +extractOpCode job = 
> +  let qop = listToMaybe . qjOps . jJob $ job 
> +      metaOps = maybe [] (JQ.toMetaOpCode . qoInput) qop 
> +  in (fmap metaOpCode) . listToMaybe $ metaOps 
> + 
> +-- | Sort the given job queue by its static lock weight in relation to 
> the 
> +-- currently running jobs. 
> +sortByStaticLocks :: ConfigData -> Queue -> [JobWithStat] -> 
> [JobWithStat] 
> +sortByStaticLocks cfg queue = sortBy (compare `on` opWeight) 
> +  where opWeight :: JobWithStat -> Double 
> +        opWeight job = staticWeight cfg (extractOpCode job) runningOps 
> +        runningOps = catMaybes . (fmap extractOpCode) . qRunning $ queue 
> + 
>  -- | Decide on which jobs to schedule next for execution. This is the 
>  -- pure function doing the scheduling. 
> -selectJobsToRun :: Int  -- ^ How many jobs are allowed to run at the 
> +selectJobsToRun :: ConfigData 
> +                -> Int  -- ^ How many jobs are allowed to run at the 
>                          -- same time. 
>                  -> Set FilterRule -- ^ Filter rules to respect for 
> scheduling 
>                  -> Queue 
>                  -> (Queue, [JobWithStat]) 
> -selectJobsToRun count filters queue = 
> +selectJobsToRun cfg count filters queue = 
>    let n = count - length (qRunning queue) - length (qManipulated queue) 
>        chosen = take n 
>                 . jobFiltering queue filters 
>                 . reasonRateLimit queue 
>                 . sortBy (comparing (calcJobPriority . jJob)) 
>                 . filter (jobEligible queue) 
> +               . sortByStaticLocks cfg queue 
>                 $ qEnqueued queue 
>        remain = deleteFirstsBy ((==) `on` (qjId . jJob)) (qEnqueued queue) 
> chosen 
>    in (queue {qEnqueued=remain, qRunning=qRunning queue ++ chosen}, 
> chosen) 
> @@ -431,7 +449,7 @@ scheduleSomeJobs qstate = do 
>        -- Select the jobs to run. 
>        count <- getMaxRunningJobs qstate 
>        chosen <- atomicModifyIORef (jqJobs qstate) 
> -                                  (selectJobsToRun count filters) 
> +                                  (selectJobsToRun cfg count filters) 
>        let jobs = map jJob chosen 
>        unless (null chosen) . logInfo . (++) "Starting jobs: " . commaJoin 
>          $ map (show . fromJobId . qjId) jobs 
> @@ -450,7 +468,7 @@ scheduleSomeJobs qstate = do 
>  showQueue :: Queue -> String 
>  showQueue (Queue {qEnqueued=waiting, qRunning=running}) = 
>    let showids = show . map (fromJobId . qjId . jJob) 
> -  in "Waiting jobs: " ++ showids waiting 
> +  in "Waiting jobs: " ++ showids waiting 
>         ++ "; running jobs: " ++ showids running 
>   
>  -- | Check if a job died, and clean up if so. Return True, if 
> diff --git a/src/Ganeti/JQueue.hs b/src/Ganeti/JQueue.hs 
> index a56cb52..58e8ccf 100644 
> --- a/src/Ganeti/JQueue.hs 
> +++ b/src/Ganeti/JQueue.hs 
> @@ -75,6 +75,7 @@ module Ganeti.JQueue 
>      , notifyJob 
>      , queueDirPermissions 
>      , archiveJobs 
> +    , toMetaOpCode 
>      -- re-export 
>      , Timestamp 
>      , InputOpCode(..) 
> diff --git a/src/Ganeti/JQueue/LockDecls.hs 
> b/src/Ganeti/JQueue/LockDecls.hs 
> new file mode 100644 
> index 0000000..2fab615 
> --- /dev/null 
> +++ b/src/Ganeti/JQueue/LockDecls.hs 
> @@ -0,0 +1,590 @@ 
> +{-| Job-specific static lock declarations. 
> + 
> +-} 
> + 
> +{- 
> + 
> +Copyright (C) 2016 Google Inc. 
> +All rights reserved. 
> + 
> +Redistribution and use in source and binary forms, with or without 
> +modification, are permitted provided that the following conditions are 
> +met: 
> + 
> +1. Redistributions of source code must retain the above copyright notice, 
> +this list of conditions and the following disclaimer. 
> + 
> +2. Redistributions in binary form must reproduce the above copyright 
> +notice, this list of conditions and the following disclaimer in the 
> +documentation and/or other materials provided with the distribution. 
> + 
> +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
> +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
> +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 
> PARTICULAR 
> +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
> +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
> +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
> +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
> +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
> +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
> +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
> +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
> + 
> +-} 
> + 
> +module Ganeti.JQueue.LockDecls 
> +    (staticWeight) where 
> + 
> +import Data.List (foldl') 
> +import Data.Maybe (isNothing, fromMaybe, catMaybes, isJust, fromJust) 
> +import Data.Map (Map) 
> +import qualified Data.Map as Map 
> +import Data.Set (Set) 
> +import qualified Data.Set as Set 
> + 
> +import Ganeti.BasicTypes 
> +import Ganeti.Config (getInstance, getNode, getGroup, instNodes) 
> +import qualified Ganeti.Constants as C 
> +import Ganeti.Errors 
> +import Ganeti.Objects (ConfigData, nodeName, instName, groupName) 
> +import qualified Ganeti.Objects.Instance as ObjInst 
> +import Ganeti.OpCodes 
> +import Ganeti.Types 
> + 
> +-- | The different levels for locks to be acquired at. 
> +data StaticLockLevel = Cluster 
> +                     | NodeGroup 
> +                     | Instance 
> +                     | Node 
> +                     | NodeRes 
> +                     | Network 
> +                     deriving (Show, Ord, Eq) 
> + 
> +-- | The different types of locks that can be acquired. 
> +data StaticLockType = None 
> +                    | Shared (Set String) 
> +                    | UnknownShared 
> +                    | AllShared 
> +                    | Exclusive (Set String) 
> +                    | UnknownExclusive 
> +                    | AllExclusive 
> +                    deriving (Show, Eq, Ord) 
> + 
> +-- | Record containing the state of locks for each level, when possible. 
> +data StaticLocks = StaticLocks { sNodeGroupLock :: Maybe StaticLockType 
> +                               , sInstanceLock :: Maybe StaticLockType 
> +                               , sNodeLock :: Maybe StaticLockType 
> +                               , sNodeResLock :: Maybe StaticLockType 
> +                               , sNetworkLock :: Maybe StaticLockType 
> +                               } 
> + 
> +emptyLocks :: StaticLocks 
> +emptyLocks = StaticLocks { sNodeGroupLock = Just None 
> +                         , sInstanceLock = Just None 
> +                         , sNodeLock = Just None 
> +                         , sNodeResLock = Just None 
> +                         , sNetworkLock = Just None 
> +                         } 
> + 
> +-- | Merge two lock types together in ascending order, in case of 
> Exclusive or 
> +-- Shared lock conflict, merge the two resources. 
> +mergeLocks :: StaticLockType -> StaticLockType -> StaticLockType 
> +mergeLocks x y 
> +  | Exclusive ex1 <- x, 
> +    Exclusive ex2 <- y = Exclusive (Set.union ex1 ex2) 
> +  | Shared sh1 <- x, 
> +    Shared sh2 <- y = Shared (Set.union sh1 sh2) 
> +  | x > y = x 
> +  | otherwise = y 
> + 
> +-- | Assign a severity arbitrary weight to each lock type union. 
> +computeLocks :: StaticLockType -> StaticLockType -> Double 
> +computeLocks None _ = C.staticLockNoWeight 
> +computeLocks (Shared _) None = C.staticLockDegradeWeight 
> +computeLocks (Shared xs) (Exclusive ys) 
> +  | Set.null $ Set.intersection xs ys = C.staticLockDegradeWeight 
> +  | otherwise = C.staticLockSureBlockWeight 
> +computeLocks (Shared _) UnknownExclusive = C.staticLockMaybeBlockWeight 
> +computeLocks (Shared _) AllExclusive = C.staticLockSureBlockWeight 
> +computeLocks (Shared _) _ = C.staticLockNoWeight 
> +computeLocks UnknownShared (Exclusive _) = C.staticLockMaybeBlockWeight 
> +computeLocks UnknownShared UnknownExclusive = 
> C.staticLockMaybeBlockWeight 
> +computeLocks UnknownShared AllExclusive = C.staticLockSureBlockWeight 
> +computeLocks UnknownShared _ = C.staticLockDegradeWeight 
> +computeLocks AllShared (Exclusive _) = C.staticLockSureBlockWeight 
> +computeLocks AllShared UnknownExclusive = C.staticLockSureBlockWeight 
> +computeLocks AllShared AllExclusive = C.staticLockSureBlockWeight 
> +computeLocks AllShared _ = C.staticLockDegradeWeight 
> +computeLocks (Exclusive _) None = C.staticLockHeavyDegradeWeight 
> +computeLocks (Exclusive _) UnknownShared = C.staticLockMaybeBlockWeight 
> +computeLocks (Exclusive _) UnknownExclusive = 
> C.staticLockMaybeBlockWeight 
> +computeLocks (Exclusive _) AllShared = C.staticLockSureBlockWeight 
> +computeLocks (Exclusive _) AllExclusive = C.staticLockSureBlockWeight 
> +computeLocks (Exclusive xs) (Shared ys) 
> +  | Set.null $ Set.intersection xs ys = C.staticLockHeavyDegradeWeight 
> +  | otherwise = C.staticLockSureBlockWeight 
> +computeLocks (Exclusive xs) (Exclusive ys) 
> +  | Set.null $ Set.intersection xs ys = C.staticLockHeavyDegradeWeight 
> +  | otherwise = C.staticLockSureBlockWeight 
> +computeLocks UnknownExclusive None = C.staticLockHeavyDegradeWeight 
> +computeLocks UnknownExclusive AllShared = C.staticLockSureBlockWeight 
> +computeLocks UnknownExclusive AllExclusive = C.staticLockSureBlockWeight 
> +computeLocks UnknownExclusive _ = C.staticLockMaybeBlockWeight 
> +computeLocks AllExclusive None = C.staticLockHeavyDegradeWeight 
> +computeLocks AllExclusive _ = C.staticLockSureBlockWeight 
> + 
> +-- | Check if the opcode type requires the Big Ganeti Lock or not. 
> +hasBGL :: OpCode -> Bool 
> +hasBGL OpNodeAdd{} = True 
> +hasBGL OpNodeRemove{} = True 
> +hasBGL OpClusterActivateMasterIp{} = True 
> +hasBGL OpClusterDeactivateMasterIp{} = True 
> +hasBGL OpClusterDestroy{} = True 
> +hasBGL OpClusterPostInit{} = True 
> +hasBGL OpClusterRename{} = True 
> +hasBGL _ = False 
> + 
> +-- | Convert a TagKind to a StaticLockLevel type. 
> +tagKindToStaticLockLevel :: TagKind -> StaticLockLevel 
> +tagKindToStaticLockLevel TagKindCluster = Cluster 
> +tagKindToStaticLockLevel TagKindGroup = NodeGroup 
> +tagKindToStaticLockLevel TagKindInstance = Instance 
> +tagKindToStaticLockLevel TagKindNode = Node 
> +tagKindToStaticLockLevel TagKindNetwork = Network 
> + 
> + 
> +-- | The three to<resource>Lock functions are wrappers used to acquire a 
> +-- common resource parameter (instance, node, or group name) and convert 
> it to 
> +-- the appropriate LockType using maybeKnownLock as a common utility 
> function. 
> +-- The function is called with either UnknownShared or UnknownExclusive 
> +-- LockTypes, which are converted respectively to their Shared or 
> Exclusive 
> +-- lock declaration. If the resource data is missing due to config 
> errors, 
> +-- then we assume the worst case scenario of an Unknown lock being 
> requested 
> +-- instead. 
> + 
> +-- | Convert an ErrorResult to the appropriate StaticLockType if 
> possible. 
> +maybeKnownLock :: StaticLockType -> ErrorResult String -> StaticLockType 
> +maybeKnownLock UnknownShared (Ok x) = Shared (Set.singleton x) 
> +maybeKnownLock UnknownExclusive (Ok x) = Exclusive (Set.singleton x) 
> +maybeKnownLock lockType _ = lockType 
> + 
> +-- | Convert an instance resource name to its appropriate lock. 
> +toInstanceLock :: ConfigData -> StaticLockType -> String -> 
> StaticLockType 
> +toInstanceLock cfg lock = maybeKnownLock lock 
> +                          . instNameWrapper 
> +                          . getInstance cfg 
> +  where instNameWrapper :: ErrorResult ObjInst.Instance -> ErrorResult 
> String 
> +        instNameWrapper (Ok inst) = maybeToError "Instance name not 
> found" 
> +                                    . instName $ inst 
> +        instNameWrapper (Bad x) = Bad x 
> + 
> +-- | Convert a node resource name to its appropriate lock. 
> +toNodeLock :: ConfigData -> StaticLockType -> NonEmptyString -> 
> StaticLockType 
> +toNodeLock cfg lock = maybeKnownLock lock 
> +                      . fmap nodeName 
> +                      . getNode cfg 
> +                      . fromNonEmpty 
> + 
> +-- | Convert a node group resource name to its appropriate lock. 
> +toGroupLock :: ConfigData -> StaticLockType -> NonEmptyString -> 
> StaticLockType 
> +toGroupLock cfg lock = maybeKnownLock lock 
> +                       . fmap groupName 
> +                       . getGroup cfg 
> +                       . fromNonEmpty 
> + 
> +-- | Obtain the node set (primary and secondaries) from an opcode's 
> instance. 
> +toInstanceNodeSet :: ConfigData -> OpCode -> Set String 
> +toInstanceNodeSet cfg = toNodes 
> +                        . fmap (instNodes cfg) 
> +                        . getInstance cfg 
> +                        . opInstanceName 
> +  where toNodes :: ErrorResult (Set String) -> Set String 
> +        toNodes (Ok nodes) = nodes 
> +        toNodes (Bad _) = Set.empty 
> + 
> +-- | Wrapper function for Tag opcode lock declarations. 
> +tagLocks :: ConfigData 
> +         -> OpCode 
> +         -> StaticLockType 
> +         -> StaticLocks 
> +         -> StaticLocks 
> +tagLocks cfg op lock currLocks = 
> +  let resource = tagKindToStaticLockLevel $ opKind op 
> +      tagName = fromMaybe "" $ opTagsGetName op 
> +      groupLock = (toGroupLock cfg lock) . fromJust . mkNonEmpty $ 
> tagName 
> +      instanceLock = toInstanceLock cfg lock tagName 
> +      nodeLock = (toNodeLock cfg lock) . fromJust . mkNonEmpty $ tagName 
> +      locks 
> +        | resource == NodeGroup = currLocks { sNodeGroupLock = Just 
> groupLock 
> +                                            , sInstanceLock = Just None 
> +                                            , sNodeLock = Just None 
> +                                            , sNetworkLock = Just None 
> +                                            } 
> +        | resource == Instance = currLocks { sNodeGroupLock = Just None 
> +                                           , sInstanceLock = Just 
> instanceLock 
> +                                           , sNodeLock = Just None 
> +                                           , sNetworkLock = Just None 
> +                                           } 
> +        | resource == Node = currLocks { sNodeGroupLock = Just None 
> +                                       , sInstanceLock = Just None 
> +                                       , sNodeLock = Just nodeLock 
> +                                       , sNetworkLock = Just None 
> +                                       } 
> +        | resource == Network = currLocks { sNodeGroupLock = Just None 
> +                                          , sInstanceLock = Just None 
> +                                          , sNodeLock = Just None 
> +                                          , sNetworkLock = Just lock 
> +                                          } 
> +        | otherwise = emptyLocks 
> +      in locks 
> + 
> +-- | Check if the given opcode has the iallocator parameter enabled. 
> +hasIAllocator :: OpCode -> Bool 
> +hasIAllocator = isJust . opIallocator 
> + 
> +-- | Declare the instance level locks for the given opcode. 
> +instanceLocks :: ConfigData -> OpCode -> StaticLocks -> StaticLocks 
> +instanceLocks cfg op s = 
> +  let instanceLock = toInstanceLock cfg 
> +      iName = opInstanceName op 
> +      mkLock st l = st { sInstanceLock = Just l } 
> +      defaultExclusive = instanceLock UnknownExclusive iName 
> +  in case op of 
> +       OpInstanceCreate{} -> mkLock s defaultExclusive 
> +       OpInstanceRecreateDisks{} -> mkLock s defaultExclusive 
> +       OpInstanceGrowDisk{} -> mkLock s defaultExclusive 
> +       OpInstanceActivateDisks{} -> mkLock s defaultExclusive 
> +       OpInstanceRemove{} -> mkLock s defaultExclusive 
> +       OpInstanceMove{} -> mkLock s defaultExclusive 
> +       OpInstanceChangeGroup{} -> mkLock s defaultExclusive 
> +       OpInstanceSetParams{} -> mkLock s defaultExclusive 
> +       OpBackupPrepare{} -> mkLock s defaultExclusive 
> +       OpBackupExport{} -> mkLock s defaultExclusive 
> +       OpInstanceStartup{} -> mkLock s defaultExclusive 
> +       OpInstanceShutdown{} -> mkLock s defaultExclusive 
> +       OpInstanceReboot{} -> mkLock s defaultExclusive 
> +       OpInstanceFailover{} -> mkLock s defaultExclusive 
> +       OpInstanceMigrate{} -> mkLock s defaultExclusive 
> +       OpInstanceReplaceDisks{} -> mkLock s defaultExclusive 
> +       OpInstanceRename{} -> let newName = fromNonEmpty $ opNewName op 
> +                                 newLock = Exclusive $ Set.singleton 
> newName 
> +                             in mkLock s $ mergeLocks newLock 
> defaultExclusive 
> +       OpNodeSetParams{} -> if isNothing $ opSecondaryIp op 
> +                               then mkLock s None 
> +                               else mkLock s UnknownShared 
> +       OpClusterRepairDiskSizes{} -> 
> +         let instanceList = map fromNonEmpty $ opInstances op 
> +             lock 
> +               | null instanceList = AllExclusive 
> +               | otherwise = foldl' mergeLocks None 
> +                           . map (instanceLock UnknownExclusive) $ 
> instanceList 
> +         in mkLock s lock 
> +       OpInstanceConsole{} -> mkLock s . instanceLock UnknownShared $ 
> iName 
> +       _ -> s 
> + 
> + 
> +-- | Declare the node and node-res level locks for the given opcode. 
> +nodeLocks :: ConfigData -> OpCode -> StaticLocks -> StaticLocks 
> +nodeLocks cfg op s = 
> +  let nName = opNodeName op 
> +      nodeLock = toNodeLock cfg 
> +      nodeLockList lockType = foldl' mergeLocks None . map (nodeLock 
> lockType) 
> +      mkNodeLock st l = st { sNodeLock = Just l } 
> +      mkNodeResLock st l = st { sNodeResLock = Just l } 
> +      mkBothLocks st l = mkNodeResLock (mkNodeLock st l) l 
> +      defaultExclusive = nodeLock UnknownExclusive nName 
> +      isOpportunistic = opOpportunisticLocking op 
> +      instNodeSet = toInstanceNodeSet cfg op 
> +      migrateLock 
> +        | hasIAllocator op = UnknownExclusive 
> +        | Set.null instNodeSet = AllExclusive 
> +        | otherwise = Exclusive (fromMaybe instNodeSet 
> +                                 . fmap ((flip Set.insert) instNodeSet 
> +                                         . fromNonEmpty) 
> +                                 . opTargetNode $ op) 
> +      mergeLockList x y 
> +        | null y = AllShared 
> +        | otherwise = foldl' mergeLocks None $ map x y 
> +      mergeNodeLocks = mergeLockList (nodeLock UnknownShared) $ opNodes 
> op 
> +  in case op of 
> +       OpGroupAssignNodes{} -> mkNodeLock s . nodeLockList 
> UnknownExclusive $ 
> +                               opReqNodes op 
> +       OpRestrictedCommand{} -> mkNodeLock s . nodeLockList UnknownShared 
> $ 
> +                                opReqNodes op 
> +       OpRepairCommand{} -> mkNodeLock s defaultExclusive 
> +       OpNodeModifyStorage{} -> mkNodeLock s defaultExclusive 
> +       OpRepairNodeStorage{} -> mkNodeLock s defaultExclusive 
> +       OpInstanceMigrate{} -> mkBothLocks s migrateLock 
> +       OpInstanceFailover{} -> mkBothLocks s migrateLock 
> +       OpInstanceMultiAlloc{} -> 
> +         let lock 
> +               | hasIAllocator op && (not isOpportunistic) = AllShared 
> +               | otherwise = UnknownShared 
> +         in mkBothLocks s lock 
> +       OpInstanceCreate{} -> 
> +         let nodeList = catMaybes [opPnode op, opSnode op] 
> +             lock 
> +               | not . null $ nodeList = nodeLockList UnknownExclusive 
> nodeList 
> +               | hasIAllocator op && (not isOpportunistic) = AllExclusive 
> +               | otherwise = UnknownExclusive 
> +         in mkBothLocks s lock 
> +       OpNodeSetParams{} -> 
> +         let mightDemote = 
> +               Just True `elem` [(fmap not) . opMasterCandidate $ op, 
> +                                 opOffline op, opDrained op, 
> +                                 (fmap not) . opMasterCapable $ op] 
> +             lock 
> +               | opAutoPromote op && mightDemote = AllExclusive 
> +               | otherwise = defaultExclusive 
> +         in mkBothLocks s lock 
> +       OpNodeMigrate{} -> mkNodeLock s . nodeLock UnknownShared $ nName 
> +       OpNodeQueryvols{} -> mkNodeLock s mergeNodeLocks 
> +       OpNodeQueryStorage{} -> mkNodeLock s mergeNodeLocks 
> +       OpClusterRepairDiskSizes{} -> 
> +         let instanceList = map fromNonEmpty $ opInstances op 
> +             lock 
> +               | null instanceList = AllShared 
> +               | otherwise = UnknownShared 
> +         in mkNodeResLock s lock 
> +       _ -> s 
> + 
> +-- | Declare the group level locks for the given opcode. 
> +groupLocks :: ConfigData -> OpCode -> StaticLocks -> StaticLocks 
> +groupLocks cfg op s = 
> +  let gName = opGroupName op 
> +      groupLock = toGroupLock cfg 
> +      mkLock st l = st { sNodeGroupLock = Just l } 
> +      defaultExclusive = groupLock UnknownExclusive gName 
> +      defaultShared = groupLock UnknownShared gName 
> +  in case op of 
> +       OpGroupAdd{} -> mkLock s defaultExclusive 
> +       OpGroupSetParams{} -> mkLock s defaultExclusive 
> +       OpGroupRemove{} -> mkLock s defaultExclusive 
> +       OpGroupRename{} -> mkLock s defaultExclusive 
> +       OpNetworkDisconnect{} -> mkLock s defaultExclusive 
> +       OpNetworkConnect{} -> mkLock s defaultShared 
> +       OpClusterVerifyGroup{} -> mkLock s defaultShared 
> +       OpInstanceReplaceDisks{} -> if hasIAllocator op 
> +                                      then mkLock s UnknownShared 
> +                                      else mkLock s None 
> +       OpInstanceCreate{} -> 
> +         let gLock 
> +              | sNodeLock s == Just AllExclusive = AllShared 
> +              | otherwise = UnknownShared 
> +         in mkLock s gLock 
> +       OpClusterVerifyDisks{} -> 
> +         let maybeGroupLock = 
> +               maybe AllShared (groupLock UnknownShared) (opOptGroupName 
> op) 
> +         in mkLock s maybeGroupLock 
> +       _ -> s 
> + 
> +-- | Declare the network level locks for the given opcode. 
> +networkLocks :: OpCode -> StaticLocks -> StaticLocks 
> +networkLocks op s = 
> +  let exclusiveLock = 
> +        Exclusive (Set.singleton . fromNonEmpty . opNetworkName $ op) 
> +      sharedLock = 
> +        Shared (Set.singleton . fromNonEmpty . opNetworkName $ op) 
> +      mkLock st l = st { sNetworkLock = Just l } 
> +  in case op of 
> +       OpNetworkAdd{} -> mkLock s exclusiveLock 
> +       OpNetworkRemove{} -> mkLock s exclusiveLock 
> +       OpNetworkSetParams{} -> mkLock s exclusiveLock 
> +       OpNetworkConnect{} -> mkLock s sharedLock 
> +       _ -> s 
> + 
> + 
> +-- | Function calculating the complete lock declaration for each opcode 
> type. 
> +staticLocks :: ConfigData -> OpCode -> Map StaticLockLevel StaticLockType 
> +staticLocks cfg op = 
> +  let lmap = lockDecl op 
> +      locks = 
> +        case op of 
> +          OpTagsGet{ opUseLocking = True } -> tagLocks cfg op 
> UnknownShared lmap 
> +          OpTagsSet{} -> tagLocks cfg op UnknownExclusive lmap 
> +          OpTagsDel{} -> tagLocks cfg op UnknownExclusive lmap 
> +          OpInstanceQueryData{ opUseLocking = False 
> +                             , opStatic = True 
> +                             } -> emptyLocks 
> +          _ -> groupLocks cfg op . nodeLocks cfg op . instanceLocks cfg 
> op 
> +               . networkLocks op $ lmap 
> +  in staticLocksToMap locks 
> + 
> +staticLocksToMap :: StaticLocks -> Map StaticLockLevel StaticLockType 
> +staticLocksToMap locks = 
> +  Map.fromList [(NodeGroup, getLock . sNodeGroupLock $ locks) 
> +               ,(Instance, getLock . sInstanceLock $ locks) 
> +               ,(Node, getLock . sNodeLock $ locks) 
> +               ,(NodeRes, getLock . sNodeResLock $ locks) 
> +               ,(Network, getLock . sNetworkLock $ locks) 
> +               ] 
> +  where getLock :: Maybe StaticLockType -> StaticLockType 
> +        getLock (Just x) = x 
> +        getLock Nothing = AllExclusive 
> + 
> + 
> +-- | Static declaration of locks for each opcode. In case of an empty 
> value 
> +-- that needs to be filled dynamically at runtime by the staticLocks 
> function, 
> +-- we use a Nothing. 
> +lockDecl :: OpCode -> StaticLocks 
> +lockDecl op = 
> +  let noLockExcept = emptyLocks 
> +      lockA = noLockExcept { sNodeGroupLock = Nothing 
> +                           , sInstanceLock = Just UnknownExclusive 
> +                           } 
> +      lockB = noLockExcept { sNodeGroupLock = Just AllShared 
> +                           , sInstanceLock = Just UnknownShared 
> +                           , sNodeLock = Just UnknownShared 
> +                           } 
> +      lockC = noLockExcept { sNodeGroupLock = Nothing 
> +                           , sInstanceLock = Nothing 
> +                           , sNodeLock = Nothing 
> +                           , sNetworkLock = Nothing 
> +                           } 
> +      lockD = noLockExcept { sNodeGroupLock = Just UnknownShared 
> +                           , sInstanceLock = Nothing 
> +                           , sNodeLock = Just UnknownExclusive 
> +                           , sNodeResLock = Just UnknownExclusive 
> +                           } 
> +      lockE = noLockExcept { sInstanceLock = Nothing 
> +                           , sNodeLock = Just UnknownExclusive 
> +                           , sNodeResLock = Just UnknownExclusive 
> +                           } 
> +      lockF = noLockExcept { sInstanceLock = Nothing 
> +                           , sNodeLock = Just UnknownExclusive 
> +                           } 
> +      lockG = noLockExcept { sNodeGroupLock = Just AllShared 
> +                           , sNetworkLock = Nothing 
> +                           } 
> +      lockH = noLockExcept { sInstanceLock = Nothing 
> +                           , sNodeLock = Nothing 
> +                           , sNodeResLock = Nothing 
> +                           } 
> +  in case op of 
> +       OpGroupAdd{} -> noLockExcept { sNodeGroupLock = Nothing } 
> +       OpGroupRename{} -> noLockExcept { sNodeGroupLock = Nothing } 
> +       OpGroupRemove{} -> noLockExcept { sNodeGroupLock = Nothing } 
> +       OpClusterVerifyDisks{} -> noLockExcept { sNodeGroupLock = Nothing 
> } 
> +       OpGroupAssignNodes{} -> 
> +         noLockExcept { sNodeGroupLock = Just UnknownExclusive 
> +                      , sNodeLock = Nothing 
> +                      } 
> +       OpGroupSetParams{} -> lockA 
> +       OpNetworkDisconnect{} -> lockA 
> +       OpGroupEvacuate{} -> lockB 
> +       OpGroupVerifyDisks{} -> lockB 
> +       OpInstanceCreate{} -> noLockExcept { sNodeGroupLock = Nothing 
> +                                          , sInstanceLock = Nothing 
> +                                          , sNodeLock = Nothing 
> +                                          , sNodeResLock = Nothing 
> +                                          } 
> +       OpTagsGet{} -> lockC 
> +       OpTagsSet{} -> lockC 
> +       OpTagsDel{} -> lockC 
> +       OpInstanceRecreateDisks{} -> lockD 
> +       OpInstanceSetParams{} -> lockD 
> +       OpInstanceGrowDisk{} -> lockE 
> +       OpInstanceRemove{} -> lockE 
> +       OpInstanceMultiAlloc{} -> noLockExcept { sNodeLock = Nothing 
> +                                              , sNodeResLock = Nothing 
> +                                              } 
> +       OpInstanceReplaceDisks{} -> 
> +         noLockExcept { sNodeGroupLock = Nothing 
> +                      , sInstanceLock = Nothing 
> +                      , sNodeLock = Just UnknownExclusive 
> +                      , sNodeResLock = Just UnknownExclusive 
> +                      } 
> +       OpInstanceActivateDisks{} -> lockF 
> +       OpInstanceDeactivateDisks{} -> lockF 
> +       OpInstanceMove{} -> noLockExcept { sInstanceLock = Nothing 
> +                                        , sNodeLock = Just AllShared 
> +                                        , sNodeResLock = Just AllShared 
> +                                        } 
> +       OpInstanceChangeGroup{} -> 
> +         noLockExcept { sNodeGroupLock = Just AllExclusive 
> +                      , sInstanceLock = Nothing 
> +                      , sNodeLock = Just AllExclusive 
> +                      } 
> +       OpNetworkAdd{} -> lockG 
> +       OpNetworkRemove{} -> lockG 
> +       OpNetworkSetParams{} -> noLockExcept { sNetworkLock = Nothing } 
> +       OpNetworkConnect{} -> 
> +         noLockExcept { sNodeGroupLock = Nothing 
> +                      , sInstanceLock = Just UnknownShared 
> +                      , sNetworkLock = Nothing 
> +                      } 
> +       OpInstanceQueryData{} -> 
> +         noLockExcept { sNodeGroupLock = Just UnknownShared 
> +                      , sInstanceLock = Just AllShared 
> +                      , sNodeLock = Just UnknownShared 
> +                      , sNodeResLock = Just UnknownShared 
> +                      } 
> +       OpRestrictedCommand{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpRepairCommand{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpNodeModifyStorage{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpNodeQueryvols{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpNodeQueryStorage{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpRepairNodeStorage{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpNodeMigrate{} -> noLockExcept { sNodeLock = Nothing } 
> +       OpInstanceFailover{} -> lockH 
> +       OpInstanceMigrate{} -> lockH 
> +       OpNodeSetParams{} -> lockH 
> +       OpNodeEvacuate{} -> noLockExcept { sNodeGroupLock = Just 
> UnknownShared 
> +                                        , sInstanceLock = Just 
> UnknownShared 
> +                                        , sNodeLock = Just UnknownShared 
> +                                        } 
> +       OpBackupPrepare{} -> noLockExcept { sInstanceLock = Nothing } 
> +       OpBackupExport{} -> noLockExcept { sInstanceLock = Nothing 
> +                                        , sNodeLock = Just AllExclusive 
> +                                        } 
> +       OpBackupRemove{} -> noLockExcept { sNodeLock = Just AllExclusive } 
> +       OpClusterRenewCrypto{} -> noLockExcept { sNodeLock = Just 
> AllExclusive } 
> +       OpOobCommand{} -> noLockExcept { sNodeLock = Just AllExclusive } 
> +       OpTestDelay{} -> noLockExcept { sNodeLock = Just UnknownExclusive 
> } 
> +       OpTestJqueue{} -> noLockExcept { sNodeLock = Just UnknownExclusive 
> } 
> +       OpClusterRedistConf{} -> noLockExcept { sNodeLock = Just AllShared 
> } 
> +       OpClusterRepairDiskSizes{} -> noLockExcept { sInstanceLock = 
> Nothing 
> +                                                  , sNodeResLock = 
> Nothing 
> +                                                  } 
> +       OpClusterSetParams{} -> noLockExcept { sNodeGroupLock = Just 
> AllShared 
> +                                            , sInstanceLock = Just 
> AllShared 
> +                                            , sNodeLock = Just AllShared 
> +                                            } 
> +       OpClusterVerifyConfig{} -> noLockExcept { sNodeGroupLock = Just 
> AllShared 
> +                                               , sInstanceLock = Just 
> AllShared 
> +                                               , sNodeLock = Just 
> AllShared 
> +                                               , sNodeResLock = Just 
> AllShared 
> +                                               , sNetworkLock = Just 
> AllShared 
> +                                               } 
> +       OpClusterVerifyGroup{} -> 
> +         noLockExcept { sNodeGroupLock = Nothing 
> +                      , sInstanceLock = Just UnknownShared 
> +                      , sNodeLock = Just UnknownShared 
> +                      } 
> +       OpInstanceStartup{} -> 
> +         noLockExcept { sInstanceLock = Nothing 
> +                      , sNodeResLock = Just UnknownExclusive 
> +                      } 
> +       OpInstanceShutdown{} -> noLockExcept { sInstanceLock = Nothing } 
> +       OpInstanceReboot{} -> noLockExcept { sInstanceLock = Nothing } 
> +       OpInstanceConsole{} -> noLockExcept { sInstanceLock = Nothing } 
> +       OpInstanceRename{} -> noLockExcept { sInstanceLock = Nothing } 
> +       _ -> emptyLocks 
> + 
> +-- | Calculate the static weight of a job's locks in relation to the 
> runtime 
> +-- list of jobs. 
> +staticWeight :: ConfigData -> Maybe OpCode -> [OpCode] -> Double 
> +staticWeight cfg op runningOps 
> +  | isNothing op || (hasBGL . fromJust $ op) = maxValue + base 
> +  | otherwise = -- For each level in the StaticLocks dictionary, we fold 
> +                -- and sum over the greatest lock values with the given 
> opcode. 
> +      Map.foldlWithKey' (\acc k v -> acc + sumLocks k v) base opWeight 
> +  where sumLocks x y = foldl' max C.staticLockNoWeight $ 
> +                       map (computeLocks y . mLookup x . staticLocks cfg) 
> +                       runningOps 
> +        mLookup k v = fromMaybe None (Map.lookup k v) 
> +        opWeight = staticLocks cfg (fromJust op) 
> +        base = C.staticLockBaseWeight 
> +        maxValue = C.staticLockSureBlockWeight * 5 -- Worst case scenario 
> +                                                   -- multiplied by all 5 
> lock 
> +                                                   -- levels 
> diff --git a/src/Ganeti/Objects.hs b/src/Ganeti/Objects.hs 
> index 1741d08..5be8adf 100644 
> --- a/src/Ganeti/Objects.hs 
> +++ b/src/Ganeti/Objects.hs 
> @@ -720,7 +720,6 @@ instance TagsObject Cluster where 
>  -- * ConfigData definitions 
>   
>  $(buildObject "ConfigData" "config" $ 
> ---  timeStampFields ++ 
>    [ simpleField "version"    [t| Int                 |] 
>    , simpleField "cluster"    [t| Cluster             |] 
>    , simpleField "nodes"      [t| Container Node      |] 
> diff --git a/test/hs/Test/Ganeti/JQueue/LockDecls.hs 
> b/test/hs/Test/Ganeti/JQueue/LockDecls.hs 
> new file mode 100644 
> index 0000000..fb3d8e5 
> --- /dev/null 
> +++ b/test/hs/Test/Ganeti/JQueue/LockDecls.hs 
> @@ -0,0 +1,150 @@ 
> +{-# LANGUAGE TemplateHaskell #-} 
> + 
> +{-| Unittests for the static lock declaration. 
> + 
> +-} 
> + 
> +{- 
> + 
> +Copyright (C) 2016 Google Inc. 
> +All rights reserved. 
> + 
> +Redistribution and use in source and binary forms, with or without 
> +modification, are permitted provided that the following conditions are 
> +met: 
> + 
> +1. Redistributions of source code must retain the above copyright notice, 
> +this list of conditions and the following disclaimer. 
> + 
> +2. Redistributions in binary form must reproduce the above copyright 
> +notice, this list of conditions and the following disclaimer in the 
> +documentation and/or other materials provided with the distribution. 
> + 
> +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
> +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
> +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 
> PARTICULAR 
> +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
> +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
> +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
> +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
> +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
> +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
> +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
> +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
> + 
> +-} 
> + 
> +module Test.Ganeti.JQueue.LockDecls (testLockDecls) where 
> + 
> +import Test.QuickCheck 
> +import Test.HUnit 
> +import qualified Data.Foldable as F 
> +import Data.List 
> +import qualified Data.Map as Map 
> +import Data.Maybe 
> + 
> +import Prelude () 
> +import Ganeti.Prelude 
> + 
> +import Test.Ganeti.TestHelper 
> +import Test.Ganeti.Objects 
> +import Test.Ganeti.OpCodes (genOpCodeFromId) 
> + 
> +import qualified Ganeti.Constants as C 
> +import Ganeti.Config 
> +import Ganeti.JQueue.LockDecls 
> +import Ganeti.JSON 
> +import Ganeti.OpCodes 
> +import Ganeti.Objects 
> +import Ganeti.Types 
> + 
> + 
> +prop_staticWeight :: ConfigData -> Maybe OpCode -> [OpCode] -> Property 
> +prop_staticWeight cfg op ops = 
> +  let weight = staticWeight cfg op ops 
> +      maxWeight = C.staticLockSureBlockWeight * 5 
> +  in (weight >= 0 && weight <= (maxWeight+ C.staticLockBaseWeight)) === 
> True 
> + 
> +genExclusiveInstanceOp :: ConfigData -> Gen OpCode 
> +genExclusiveInstanceOp cfg = do 
> +  let list = [ "OP_INSTANCE_STARTUP" 
> +             , "OP_INSTANCE_SHUTDOWN" 
> +             , "OP_INSTANCE_REBOOT" 
> +             , "OP_INSTANCE_RENAME" 
> +             ] 
> +      insts = map instName . Map.elems . fromContainer . configInstances 
> $ cfg 
> +  op_id <- elements list 
> +  op <- genOpCodeFromId op_id 
> +  name <- elements insts 
> +  return $ op { opInstanceName = fromMaybe "" name } 
> + 
> +prop_instNameConflictCheck :: Property 
> +prop_instNameConflictCheck = do 
> +  forAll (genConfigDataWithValues 10 50) $ \ cfg -> 
> +    forAll (genExclusiveInstanceOp cfg) $ \ op1 -> 
> +      forAll (genExclusiveInstanceOp cfg) $ \ op2 -> 
> +        forAll (genExclusiveInstanceOp cfg) $ \ op3 -> 
> +          let w1 = staticWeight cfg (Just op1) [op3] 
> +              w2 = staticWeight cfg (Just op2) [op3] 
> +              iName1 = opInstanceName op1 
> +              iName2 = opInstanceName op2 
> +              iName3 = opInstanceName op3 
> +              testResult 
> +                | iName1 == iName2 = True 
> +                | iName1 == iName3 = (w2 <= w1) 
> +                | iName2 == iName3 = (w1 <= w2) 
> +                | otherwise = True 
> +          in testResult 
> + 
> +genExclusiveNodeOp :: ConfigData -> Gen OpCode 
> +genExclusiveNodeOp cfg = do 
> +  let list = [ "OP_REPAIR_COMMAND" 
> +             , "OP_NODE_MODIFY_STORAGE" 
> +             , "OP_REPAIR_NODE_STORAGE" 
> +             ] 
> +      nodes = map nodeName . F.toList . configNodes $ cfg 
> +  op_id <- elements list 
> +  op <- genOpCodeFromId op_id 
> +  name <- elements nodes 
> +  return $ op { opNodeName = fromJust $ mkNonEmpty name } 
> + 
> +prop_nodeNameConflictCheck :: Property 
> +prop_nodeNameConflictCheck = do 
> +  forAll (genConfigDataWithValues 10 50) $ \ cfg -> 
> +    forAll (genExclusiveNodeOp cfg) $ \ op1 -> 
> +      forAll (genExclusiveNodeOp cfg) $ \ op2 -> 
> +        forAll (genExclusiveNodeOp cfg) $ \ op3 -> 
> +          let w1 = staticWeight cfg (Just op1) [op3] 
> +              w2 = staticWeight cfg (Just op2) [op3] 
> +              nName1 = opNodeName op1 
> +              nName2 = opNodeName op2 
> +              nName3 = opNodeName op3 
> +              testResult 
> +                | nName1 == nName2 = True 
> +                | nName1 == nName3 = (w2 <= w1) 
> +                | nName2 == nName3 = (w1 <= w2) 
> +                | otherwise = True 
> +          in testResult 
> + 
> +case_queueLockOpOrder :: Assertion 
> +case_queueLockOpOrder = do 
> +  cfg <- generate $ genConfigDataWithValues 10 50 
> +  diagnoseOp <- generate . genOpCodeFromId $ "OP_OS_DIAGNOSE" 
> +  networkAddOp <- generate . genOpCodeFromId $ "OP_NETWORK_ADD" 
> +  groupVerifyOp <- generate . genOpCodeFromId $ "OP_GROUP_VERIFY_DISKS" 
> +  nodeAddOp <- generate . genOpCodeFromId $ "OP_NODE_ADD" 
> +  currentOp <- generate . genExclusiveInstanceOp $ cfg 
> +  let w1 = staticWeight cfg (Just diagnoseOp) [currentOp] 
> +      w2 = staticWeight cfg (Just networkAddOp) [currentOp] 
> +      w3 = staticWeight cfg (Just groupVerifyOp) [currentOp] 
> +      w4 = staticWeight cfg (Just nodeAddOp) [currentOp] 
> +      weights = [w1, w2, w3, w4] 
> +  assertEqual "weights should be sorted" 
> +    weights 
> +    (sort weights) 
> + 
> + 
> +testSuite "LockDecls" [ 'prop_staticWeight 
> +                      , 'prop_instNameConflictCheck 
> +                      , 'prop_nodeNameConflictCheck 
> +                      , 'case_queueLockOpOrder ] 
> diff --git a/test/hs/Test/Ganeti/Objects.hs 
> b/test/hs/Test/Ganeti/Objects.hs 
> index 49cfb64..4fe2ae5 100644 
> --- a/test/hs/Test/Ganeti/Objects.hs 
> +++ b/test/hs/Test/Ganeti/Objects.hs 
> @@ -40,6 +40,7 @@ module Test.Ganeti.Objects 
>    ( testObjects 
>    , Node(..) 
>    , genConfigDataWithNetworks 
> +  , genConfigDataWithValues 
>    , genDisk 
>    , genDiskWithChildren 
>    , genEmptyCluster 
> @@ -122,7 +123,7 @@ instance Arbitrary BS.ByteString where 
>  $(genArbitrary ''PartialNDParams) 
>   
>  instance Arbitrary Node where 
> -  arbitrary = Node <$> genFQDN <*> genFQDN <*> genFQDN 
> +  arbitrary = Node <$> genFQDN <*> genIp6Addr <*> genIp6Addr 
>                <*> arbitrary <*> arbitrary <*> arbitrary <*> genFQDN 
>                <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary 
>                <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary 
> @@ -528,6 +529,33 @@ genConfigDataWithNetworks old_cfg = do 
>        new_cfg = old_cfg { configNetworks = net_map } 
>    return new_cfg 
>   
> +genConfigDataWithValues :: Int -> Int -> Gen ConfigData 
> +genConfigDataWithValues nNodes nInsts = do 
> +  emptyData <- genEmptyCluster nNodes 
> +  insts <- vectorOf nInsts (genInstanceFromConfigData emptyData) 
> +  let getInstName i 
> +        | RealInstance rinst <- i = UTF8.fromString $ realInstName rinst 
> +        | ForthcomingInstance finst <- i = 
> +            UTF8.fromString . fromMaybe "" $ forthcomingInstName finst 
> +        | otherwise = error ("Inconsistent instance type: " ++ show i) 
> +  let instmap = Map.fromList . map (\x -> (getInstName x, x)) $ insts 
> +      continsts = GenericContainer instmap 
> +  return $ emptyData { configInstances = continsts } 
> + 
> +genInstanceFromConfigData :: ConfigData -> Gen Instance 
> +genInstanceFromConfigData cfg = do 
> +  inst <- RealInstance <$> arbitrary :: Gen Instance 
> +  let nodes = getKeysFromContainer . configNodes $ cfg 
> +      new_inst = case inst of 
> +                      RealInstance rinst -> 
> +                        RealInstance rinst { realInstPrimaryNode = head 
> nodes } 
> +                      ForthcomingInstance finst -> 
> +                        ForthcomingInstance finst 
> +                          { forthcomingInstPrimaryNode = Just $ head 
> nodes } 
> +  -- FIXME: generate instance's secondary nodes using drbd/disk info 
> +  return new_inst 
> + 
> + 
>  -- * Test properties 
>   
>  -- | Tests that fillDict behaves correctly 
> @@ -712,6 +740,7 @@ genNodeGroup = do 
>                serial tags 
>    return group 
>   
> + 
>  instance Arbitrary NodeGroup where 
>    arbitrary = genNodeGroup 
>   
> diff --git a/test/hs/Test/Ganeti/OpCodes.hs 
> b/test/hs/Test/Ganeti/OpCodes.hs 
> index bc16c9e..405d7fe 100644 
> --- a/test/hs/Test/Ganeti/OpCodes.hs 
> +++ b/test/hs/Test/Ganeti/OpCodes.hs 
> @@ -37,6 +37,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 
> DAMAGE. 
>   
>  module Test.Ganeti.OpCodes 
>    ( testOpCodes 
> +  , genOpCodeFromId 
>    , OpCodes.OpCode(..) 
>    ) where 
>   
> @@ -140,395 +141,399 @@ arbitraryDataCollectorInterval = do 
>    intervals <- vector $ length els 
>    genMaybe . return . containerFromList $ zip els intervals 
>   
> +genOpCodeFromId :: String -> Gen OpCodes.OpCode 
> +genOpCodeFromId op_id = 
> +  case op_id of 
> +    "OP_TEST_DELAY" -> 
> +      OpCodes.OpTestDelay <$> arbitrary <*> arbitrary <*> 
> +        genNodeNamesNE <*> return Nothing <*> arbitrary <*> arbitrary <*> 
> +        arbitrary 
> +    "OP_INSTANCE_REPLACE_DISKS" -> 
> +      OpCodes.OpInstanceReplaceDisks <$> genFQDN <*> return Nothing <*> 
> +        arbitrary <*> arbitrary <*> arbitrary <*> genDiskIndices <*> 
> +        genMaybe genNodeNameNE <*> return Nothing <*> genMaybe genNameNE 
> +    "OP_INSTANCE_FAILOVER" -> 
> +      OpCodes.OpInstanceFailover <$> genFQDN <*> return Nothing <*> 
> +      arbitrary <*> arbitrary <*> genMaybe genNodeNameNE <*> 
> +      return Nothing <*> arbitrary <*> arbitrary <*> genMaybe genNameNE 
> +    "OP_INSTANCE_MIGRATE" -> 
> +      OpCodes.OpInstanceMigrate <$> genFQDN <*> return Nothing <*> 
> +        arbitrary <*> arbitrary <*> genMaybe genNodeNameNE <*> 
> +        return Nothing <*> arbitrary <*> arbitrary <*> arbitrary <*> 
> +        genMaybe genNameNE <*> arbitrary <*> arbitrary 
> +    "OP_TAGS_GET" -> 
> +      arbitraryOpTagsGet 
> +    "OP_TAGS_SEARCH" -> 
> +      OpCodes.OpTagsSearch <$> genNameNE 
> +    "OP_TAGS_SET" -> 
> +      arbitraryOpTagsSet 
> +    "OP_TAGS_DEL" -> 
> +      arbitraryOpTagsDel 
> +    "OP_CLUSTER_POST_INIT" -> pure OpCodes.OpClusterPostInit 
> +    "OP_CLUSTER_RENEW_CRYPTO" -> OpCodes.OpClusterRenewCrypto 
> +       <$> arbitrary -- Node SSL certificates 
> +       <*> arbitrary -- renew_ssh_keys 
> +       <*> arbitrary -- ssh_key_type 
> +       <*> arbitrary -- ssh_key_bits 
> +       <*> arbitrary -- verbose 
> +       <*> arbitrary -- debug 
> +    "OP_CLUSTER_DESTROY" -> pure OpCodes.OpClusterDestroy 
> +    "OP_CLUSTER_QUERY" -> pure OpCodes.OpClusterQuery 
> +    "OP_CLUSTER_VERIFY" -> 
> +      OpCodes.OpClusterVerify <$> arbitrary <*> arbitrary <*> 
> +        genListSet Nothing <*> genListSet Nothing <*> arbitrary <*> 
> +        genMaybe genNameNE <*> arbitrary 
> +    "OP_CLUSTER_VERIFY_CONFIG" -> 
> +      OpCodes.OpClusterVerifyConfig <$> arbitrary <*> arbitrary <*> 
> +        genListSet Nothing <*> arbitrary 
> +    "OP_CLUSTER_VERIFY_GROUP" -> 
> +      OpCodes.OpClusterVerifyGroup <$> genNameNE <*> arbitrary <*> 
> +        arbitrary <*> genListSet Nothing <*> genListSet Nothing <*> 
> +        arbitrary <*> arbitrary 
> +    "OP_CLUSTER_VERIFY_DISKS" -> 
> +      OpCodes.OpClusterVerifyDisks <$> genMaybe genNameNE <*> arbitrary 
> +    "OP_GROUP_VERIFY_DISKS" -> 
> +      OpCodes.OpGroupVerifyDisks <$> genNameNE <*> arbitrary 
> +    "OP_CLUSTER_REPAIR_DISK_SIZES" -> 
> +      OpCodes.OpClusterRepairDiskSizes <$> genNodeNamesNE 
> +    "OP_CLUSTER_CONFIG_QUERY" -> 
> +      OpCodes.OpClusterConfigQuery <$> genFieldsNE 
> +    "OP_CLUSTER_RENAME" -> 
> +      OpCodes.OpClusterRename <$> genNameNE 
> +    "OP_CLUSTER_SET_PARAMS" -> 
> +      OpCodes.OpClusterSetParams 
> +        <$> arbitrary                    -- force 
> +        <*> emptyMUD                     -- hv_state 
> +        <*> emptyMUD                     -- disk_state 
> +        <*> genMaybe genName             -- vg_name 
> +        <*> genMaybe arbitrary           -- enabled_hypervisors 
> +        <*> genMaybe genEmptyContainer   -- hvparams 
> +        <*> emptyMUD                     -- beparams 
> +        <*> genMaybe genEmptyContainer   -- os_hvp 
> +        <*> genMaybe genEmptyContainer   -- osparams 
> +        <*> genMaybe genEmptyContainer   -- osparams_private_cluster 
> +        <*> genMaybe genEmptyContainer   -- diskparams 
> +        <*> genMaybe arbitrary           -- candidate_pool_size 
> +        <*> genMaybe arbitrary           -- max_running_jobs 
> +        <*> genMaybe arbitrary           -- max_tracked_jobs 
> +        <*> arbitrary                    -- uid_pool 
> +        <*> arbitrary                    -- add_uids 
> +        <*> arbitrary                    -- remove_uids 
> +        <*> arbitrary                    -- maintain_node_health 
> +        <*> arbitrary                    -- prealloc_wipe_disks 
> +        <*> arbitrary                    -- nicparams 
> +        <*> emptyMUD                     -- ndparams 
> +        <*> emptyMUD                     -- ipolicy 
> +        <*> genMaybe genPrintableAsciiString 
> +                                         -- drbd_helper 
> +        <*> genMaybe genPrintableAsciiString 
> +                                         -- default_iallocator 
> +        <*> emptyMUD                     -- default_iallocator_params 
> +        <*> genMaybe genMacPrefix        -- mac_prefix 
> +        <*> genMaybe genPrintableAsciiString 
> +                                         -- m...

Reply via email to