Hi!

I have a case where slurm allocated less cores then is required.

It looks like it's not happening everytime but (right now) 1 of 10 failes this way.

Probably because of the layout of the current jobs of the nodes.

Here is some information I collected. I also attach our slurm.conf.

We are running Slurm 2.6.1.

Best regards,
Magnus

==> submit <==
#!/bin/bash
#SBATCH -J 84212
#SBATCH --error=err.%J
#SBATCH --output=out.%J
#SBATCH -n 16
#SBATCH -c 12
#SBATCH -t 00:05:00

echo -----------
env | grep ^SLURM
echo -----------
scontrol show job -d -d $SLURM_JOBID
echo -----------

srun echo ""

==> out.1313514 <==
-----------
SLURM_CHECKPOINT_IMAGE_DIR=/pfs/nobackup/home/m/magnus/84212
SLURM_NODELIST=t-cn[0113,0423-0424,0433-0434]
SLURM_JOB_NAME=84212
SLURMD_NODENAME=t-cn0113
SLURM_TOPOLOGY_ADDR=t-isw0501.t-isw0101.t-cn0113
SLURM_PRIO_PROCESS=0
SLURM_NODE_ALIASES=(null)
SLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.node
SLURM_MEM_PER_CPU=2500
SLURM_NNODES=5
SLURM_JOBID=1313514
SLURM_NTASKS=16
SLURM_TASKS_PER_NODE=3,4(x2),3,2
SLURM_JOB_ID=1313514
SLURM_CPUS_PER_TASK=12
SLURM_NODEID=0
SLURM_SUBMIT_DIR=/pfs/nobackup/home/m/magnus/84212
SLURM_TASK_PID=19827
SLURM_NPROCS=16
SLURM_CPUS_ON_NODE=24
SLURM_PROCID=0
SLURM_JOB_NODELIST=t-cn[0113,0423-0424,0433-0434]
SLURM_LOCALID=0
SLURM_JOB_CPUS_PER_NODE=24,48(x2),36,24
SLURM_GTIDS=0
SLURM_SUBMIT_HOST=t-mn01.hpc2n.umu.se
SLURM_JOB_NUM_NODES=5
-----------
JobId=1313514 Name=84212
   UserId=magnus(2066) GroupId=folk(3001)
   Priority=10 Account=default QOS=normal
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:00:01 TimeLimit=00:05:00 TimeMin=N/A
   SubmitTime=2013-10-04T15:16:43 EligibleTime=2013-10-04T15:16:43
   StartTime=2013-10-04T15:23:02 EndTime=2013-10-04T15:28:02
   PreemptTime=None SuspendTime=None SecsPreSuspend=0
   Partition=batch AllocNode:Sid=t-mn01:8853
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=t-cn[0113,0423-0424,0433-0434]
   BatchHost=t-cn0113
   NumNodes=5 NumCPUs=180 CPUs/Task=12 ReqS:C:T=*:*:*
     Nodes=t-cn0113 CPU_IDs=12-35 Mem=60000
     Nodes=t-cn[0423-0424] CPU_IDs=0-47 Mem=120000
     Nodes=t-cn0433 CPU_IDs=6-41 Mem=90000
     Nodes=t-cn0434 CPU_IDs=6-11,24-41 Mem=60000
   MinCPUsNode=12 MinMemoryCPU=2500M MinTmpDiskNode=0
   Features=(null) Gres=(null) Reservation=(null)
   Shared=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/pfs/nobackup/home/m/magnus/84212/submit
   WorkDir=/pfs/nobackup/home/m/magnus/84212
   BatchScript=
#!/bin/bash
#SBATCH -J 84212
#SBATCH --error=err.%J
#SBATCH --output=%J
#SBATCH -n 16
#SBATCH -c 12
#SBATCH -t 00:05:00

echo -----------
env | grep ^SLURM
echo -----------
scontrol show job -d -d $SLURM_JOBID
echo -----------

srun echo ""

-----------

==> err.1313514 <==
srun: error: Unable to create job step: More processors requested than permitted


==> slurm.log <==
Oct  4 15:23:02 t-mn02 slurmctld[28426]: backfill test for job 1313514
Oct 4 15:23:02 t-mn02 slurmctld[28426]: error: cons_res: _compute_c_b_task_dist oversubscribe for job 1313514 Oct 4 15:23:02 t-mn02 slurmctld[28426]: backfill: Started JobId=1313514 on t-cn[0113,0423-0424,0433-0434] Oct 4 15:23:03 t-mn02 slurmctld[28426]: _slurm_rpc_job_step_create for job 1313514: More processors requested than permitted
Oct  4 15:23:03 t-mn02 slurmctld[28426]: completing job 1313514
Oct 4 15:23:03 t-mn02 slurmctld[28426]: sched: job_complete for JobId=1313514 successful, exit code=256

--
Magnus Jonsson, Developer, HPC2N, UmeƄ Universitet
#
# See the slurm.conf man page for more information.
#
ControlMachine=t-mn02
#ControlAddr=
#BackupController=
#BackupAddr=
# 
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none 
CryptoType=crypto/munge
DisableRootJobs=YES
EnforcePartLimits=YES
RebootProgram=/sbin/reboot
# our cleanup epilog
Epilog=/var/conf/slurm/hpc2n-epilog
#PrologSlurmctld= 
#FirstJobId=1 
#GresTypes= 
#GroupUpdateForce=0 
#GroupUpdateTime=600 
#JobCheckpointDir=/var/slurm/checkpoint 
#JobCredentialPrivateKey=
#JobCredentialPublicCertificate=
#JobFileAppend=0 
#JobRequeue=1 
#JobSubmitPlugins=1 
#KillOnBadExit=0 
###################
#Optimization Toolbox                   2
#Partial Differential Equation Toolbox  2
#Statistics Toolbox                     2
#Image Processing Toolbox               2
#Curve Fitting Toolbox                  5
#Signal Processing Toolbox              5
#Communications Toolbox                 5
#Parallel Computing Toolbox             15
# One more then we actually have!
Licenses=matlab*21,matlab-pct*16,matlab-ct*6,matlab-spt*6,matlab-cft*6,matlab-ipt*3,matlab-st*3,matlab-pdet*3,matlab-ot*3
MailProg=/usr/bin/mail 
MaxJobCount=20000
#MaxTasksPerNode=128 
#MpiDefault=none
MpiDefault=openmpi
#MpiParams=ports=#-# 
## needs openmpi-1.5+
MpiParams=ports=12000-12999
#PluginDir= 
#PlugStackConfig= 
#PrivateData=jobs 
ProctrackType=proctrack/cgroup
#Prolog=
#PrologSlurmctld= 
#PropagatePrioProcess=0 
#PropagateResourceLimits= 
PropagateResourceLimitsExcept=CPU,MEMLOCK 
ReturnToService=1
#SallocDefaultCommand= 
#SlurmctldPidFile=
SlurmctldPort=6800-6817
#SlurmdPidFile=
SlurmdPort=6818
SlurmdSpoolDir=/var/spool/slurmd
SlurmUser=slurm
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/var/spool/slurm
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/cgroup,task/affinity
#TaskPlugin=task/affinity
#TaskPluginParam=Sched
#TaskPluginParam=Cpusets
#TaskPluginParam=Verbose
#TaskProlog=
TopologyPlugin=topology/tree 
#TopologyPlugin=topology/none
#TmpFs=/tmp 
TmpFs=/scratch
#TrackWCKey=no 
#TreeWidth= 
#UnkillableStepProgram= 
UsePAM=1
# 
# 
# TIMERS 
#BatchStartTimeout=10 
#CompleteWait=0 
#EpilogMsgTime=2000 
#GetEnvTimeout=2 
HealthCheckInterval=3600
HealthCheckProgram=/var/conf/slurm/hpc2n-healthcheck
InactiveLimit=0
KillWait=30
#MessageTimeout=10 
#ResvOverRun=0 
MinJobAge=300
#OverTimeLimit=0 
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60 
#VSizeFactor=0 
WaitTime=60
# 
# 
# SCHEDULING 
#DefMemPerCPU=4000
#DefMemPerCPU=1950
FastSchedule=1
#MaxMemPerCPU=0 
#MaxMemPerCPU=2000
#MaxMemPerCPU=8000
#MaxMemPerCPU=4000
# ( cpu in this context is socket ) # hpc2n
#SchedulerRootFilter=1 
#SchedulerTimeSlice=30 
SchedulerType=sched/backfill
SchedulerParameters=max_job_bf=1000,bf_window=1440,default_queue_depth=500,bf_resolution=1800,bf_max_job_user=5,bf_continue
SelectType=select/cons_res
SelectTypeParameters=CR_Socket_Memory,CR_CORE_DEFAULT_DIST_BLOCK,CR_ALLOCATE_FULL_SOCKET
# 
# 
# JOB PRIORITY 
PriorityType=priority/multifactor
#PriorityType=priority/basic 
#PriorityDecayHalfLife= 
## 14 days and 0 hours
PriorityDecayHalfLife=14-0
#PriorityCalcPeriod= 
#PriorityFavorSmall= 
#PriorityMaxAge= 
#PriorityUsageResetPeriod= 
#PriorityWeightAge= 
PriorityWeightFairshare=1000000 
#PriorityWeightJobSize= 
PriorityWeightPartition=10000
#PriorityWeightQOS= 
# 
# 
# LOGGING AND ACCOUNTING 
AccountingStorageEnforce=associations,limits,qos
AccountingStorageHost=t-mn02
#AccountingStorageLoc=
#AccountingStoragePass=
#AccountingStoragePort=
AccountingStorageType=accounting_storage/slurmdbd
#AccountingStorageUser=
ClusterName=abisko
#DebugFlags= 
#JobCompHost=
#JobCompLoc=/var/log/slurm/jobcomp.log
#JobCompPass=
#JobCompPort=
#JobCompType=jobcomp/filetxt
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/linux
SlurmctldDebug=3
#SlurmctldLogFile=/var/log/slurm/slurmctld.log
SlurmdDebug=3
#SlurmdLogFile=/var/log/slurm/slurmd.log
#SlurmSchedLogFile=/var/log/slurm/slurmsched.log
#SlurmSchedLogLevel=0

# logs extra information about jobs.
EpilogSlurmctld=/var/conf/slurm/hpc2n-ctldepilog

# 
# 
# POWER SAVE SUPPORT FOR IDLE NODES (optional) 
#SuspendProgram= 
#ResumeProgram= 
#SuspendTimeout= 
#ResumeTimeout=
#ResumeRate= 
#SuspendExcNodes= 
#SuspendExcParts= 
#SuspendRate= 
#SuspendTime= 
# 
# 
# COMPUTE NODES 
NodeName=t-cn[0101-0136] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0201-0236] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0301-0336] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0401-0436] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0601-0636] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0701-0736] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0801-0836] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[0901-0936] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[1001-1020] RealMemory=129000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[1021-1030] RealMemory=516000 Sockets=8 CoresPerSocket=6
NodeName=t-cn[1031-1034] RealMemory=128000 Sockets=8 CoresPerSocket=6

# Partition Configurations
PartitionName=batch 
Nodes=t-cn[0101-0136,0201-0236,0301-0336,0401-0436,0601-0636,0701-0736,0801-0836,0901-0936,1001-1020,1021-1030]
 Default=YES DefaultTime=30:00 MaxTime=5-0 DefMemPerCPU=2500
PartitionName=bigmem Nodes=t-cn[1021-1030] Default=no DefaultTime=30:00 
MaxTime=5-0 DefMemPerCPU=10000 Priority=1000

PartitionName=devel Nodes=t-cn[1031-1034] AllowGroups=sysop Default=no 
DefaultTime=30:00 MaxTime=5-0 hidden=yes DefMemPerCPU=2500

# Only for grid (core jobs)
PartitionName=grid 
Nodes=t-cn[0101-0136,0201-0236,0301-0336,0401-0436,0601-0636,0701-0736,0801-0836,0901-0936,1001-1020,1021-1030]
 
AllowGroups=aatlsm,aatlpd,arcatlas,icecube,biogrid,swegrid,sweops,ndgfops,cernops,sysop
 Default=no DefaultTime=30:00 MaxTime=5-0 hidden=yes DefMemPerCPU=2500 
SelectTypeParameters=CR_Core

Attachment: smime.p7s
Description: S/MIME Cryptographic Signature

Reply via email to