commit 5394497ccf8ebb6dea00fa07fba5a6eb50ae58f5
Merge: c578370 6f7fcb8
Author: Klaus Aehlig <[email protected]>
Date: Thu Mar 5 15:55:24 2015 +0100
Merge branch 'stable-2.12' into stable-2.13
* stable-2.12
Only read config if necessary
Always OutDate() the lu's config
Outdate the config when waiting for locks
Support outdating a config
Allow unlocked reading of the config
Also in tests, open fake config before editing
Fix a few haddock comments
* stable-2.11
Skip offline nodes in RENEW_CRYPTO jobs
* stable-2.10
Remove unused import
Use an old way to instance NFData CollectorData
MonD: force computation of state in stateful collectors
Instance NFData CollectorData
Semantical Conflicts:
+ Readd those OutDate of 8303156e9d35317be0fbf68fe147b3148874408c
that couldn't be part of the
cherry-pick 4a26f3bbbb672647f68dca6b34c2325be8073e2a
+ As the type of the state of stateful data collectors has
changed, also add an instance NFData ClockTime.
+ Code added in 011924bd75bc27dec07ba9547ab9510a5724a8f8 left
the scope of feedback_fn
Signed-off-by: Klaus Aehlig <[email protected]>
diff --cc lib/cmdlib/cluster.py
index 414127a,193f306..5035918
--- a/lib/cmdlib/cluster.py
+++ b/lib/cmdlib/cluster.py
@@@ -156,6 -130,9 +156,9 @@@ class LUClusterRenewCrypto(NoHooksLU)
self.cfg.AddNodeToCandidateCerts(master_uuid, new_master_digest)
nodes = self.cfg.GetAllNodesInfo()
for (node_uuid, node_info) in nodes.items():
+ if node_info.offline:
- feedback_fn("* Skipping offline node %s" % node_info.name)
++ logging.info("* Skipping offline node %s", node_info.name)
+ continue
if node_uuid != master_uuid:
new_digest = CreateNewClientCert(self, node_uuid)
if node_info.master_candidate:
diff --cc lib/mcpu.py
index 609e240,7446bc1..c83bb35
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@@ -476,10 -412,8 +476,12 @@@ class Processor(object)
else:
request = [[lock, "exclusive"] for lock in locks]
+ if request_only:
+ logging.debug("Lock request for level %s is %s", level, request)
+ return request
+
+ self.cfg.OutDate()
+
if timeout is None:
## Note: once we are so desperate for locks to request them
## unconditionally, we no longer care about an original plan
@@@ -568,13 -535,6 +571,14 @@@
acquiring_locks = level in lu.needed_locks
if level not in locking.LEVELS:
+ if pending:
+ self._RequestAndWait(pending, calc_timeout())
++ lu.cfg.OutDate()
+ lu.wconfdlocks = self.wconfd.Client().ListLocks(self._wconfdcontext)
+ pending = []
+
+ logging.debug("Finished acquiring locks")
+
_VerifyLocks(lu)
if self._cbs:
@@@ -611,13 -558,6 +615,14 @@@
# Determine if the acquiring is opportunistic up front
opportunistic = lu.opportunistic_locks[level]
+ dont_collate = lu.dont_collate_locks[level]
+
+ if dont_collate and pending:
+ self._RequestAndWait(pending, calc_timeout())
++ lu.cfg.OutDate()
+ lu.wconfdlocks = self.wconfd.Client().ListLocks(self._wconfdcontext)
+ pending = []
+
if adding_locks and opportunistic:
# We could simultaneously acquire locks opportunistically and add new
# ones, but that would require altering the API, and no use cases are
@@@ -648,23 -588,12 +653,24 @@@
if adding_locks:
needed_locks.extend(_LockList(lu.add_locks[level]))
- self._AcquireLocks(level, needed_locks, share, opportunistic,
- calc_timeout(),
- opportunistic_count=opportunistic_count)
- lu.wconfdlocks = self.wconfd.Client().ListLocks(self._wconfdcontext)
-
- result = self._LockAndExecLU(lu, level + 1, calc_timeout)
+ timeout = calc_timeout()
+ if timeout is not None and not opportunistic:
+ pending = pending + self._AcquireLocks(level, needed_locks, share,
+ opportunistic, timeout,
+ request_only=True)
+ else:
+ if pending:
+ self._RequestAndWait(pending, calc_timeout())
++ lu.cfg.OutDate()
+ lu.wconfdlocks =
self.wconfd.Client().ListLocks(self._wconfdcontext)
+ pending = []
+ self._AcquireLocks(level, needed_locks, share, opportunistic,
+ timeout,
+ opportunistic_count=opportunistic_count)
+ lu.wconfdlocks = self.wconfd.Client().ListLocks(self._wconfdcontext)
+
+ result = self._LockAndExecLU(lu, level + 1, calc_timeout,
+ pending=pending)
finally:
levelname = locking.LEVEL_NAMES[level]
logging.debug("Freeing locks at level %s for %s",
diff --cc src/Ganeti/DataCollectors/Types.hs
index e4db7b8,c2ab6ad..b1414c3
--- a/src/Ganeti/DataCollectors/Types.hs
+++ b/src/Ganeti/DataCollectors/Types.hs
@@@ -1,4 -1,4 +1,5 @@@
{-# LANGUAGE TemplateHaskell #-}
++{-# OPTIONS_GHC -fno-warn-orphans #-}
{-| Implementation of the Ganeti data collector types.
@@@ -47,10 -47,10 +48,12 @@@ module Ganeti.DataCollectors.Type
, buildReport
, mergeStatuses
, getCategoryName
+ , ReportBuilder(..)
+ , DataCollector(..)
) where
+ import Control.DeepSeq (NFData, rnf)
+ import Control.Seq (using, seqFoldable, rdeepseq)
import Data.Char
import Data.Ratio
import qualified Data.Map as Map
@@@ -139,8 -138,27 +142,30 @@@ instance JSON DCVersion wher
readJSON v = fail $ "Invalid JSON value " ++ show v ++ " for type DCVersion"
-- | Type for the value field of the `CollectorMap` below.
-data CollectorData = CPULoadData (Seq.Seq (Integer, [Int]))
+data CollectorData = CPULoadData (Seq.Seq (ClockTime, [Int]))
+ {-
+
+ Naturally, we want to make CollectorData an instance of NFData as
+ follows.
+
+ instance NFData CollectorData where
+ rnf (CPULoadData x) = rnf x
+
+ However, Seq.Seq only became an instance of NFData in version 0.5.0.0
+ of containers (Released 2012). So, for the moment, we use a generic
+ way to reduce to normal form. In later versions of Ganeti, where we
+ have the infra structure to do so, we will choose implementation depending
+ on the version of the containers library available.
+
+ -}
+
++instance NFData ClockTime where
++ rnf (TOD x y) = rnf x `seq` rnf y
++
+ instance NFData CollectorData where
+ rnf (CPULoadData x) = (x `using` seqFoldable rdeepseq) `seq` ()
+
-- | Type for the map storing the data of the statefull DataCollectors.
type CollectorMap = Map.Map String CollectorData
diff --cc src/Ganeti/Monitoring/Server.hs
index bd98d35,62074b3..0c3cb0f
--- a/src/Ganeti/Monitoring/Server.hs
+++ b/src/Ganeti/Monitoring/Server.hs
@@@ -42,15 -41,13 +42,17 @@@ module Ganeti.Monitoring.Serve
) where
import Control.Applicative
+ import Control.DeepSeq (force)
+ import Control.Exception.Base (evaluate)
import Control.Monad
import Control.Monad.IO.Class
-import Data.ByteString.Char8 hiding (map, filter, find)
-import Data.List
+import Data.ByteString.Char8 (pack, unpack)
+import Data.Maybe (fromMaybe)
+import Data.List (find)
+import Data.Monoid (mempty)
import qualified Data.Map as Map
+import qualified Data.PSQueue as Queue
+import Network.BSD (getServicePortNumber)
import Snap.Core
import Snap.Http.Server
import qualified Text.JSON as J
--
Klaus Aehlig
Google Germany GmbH, Dienerstr. 12, 80331 Muenchen
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg
Geschaeftsfuehrer: Graham Law, Christine Elizabeth Flores